Просмотр исходного кода

change:
1. 优化平台启动脚本, 优化https配置文件逻辑

kindring 2 лет назад
Родитель
Сommit
706e0c9aa6
43 измененных файлов с 224 добавлено и 3319 удалено
  1. 0 23
      docker/config.cfg
  2. 0 26
      docker/gbDocker/Dockerfile
  3. 0 111
      docker/gbDocker/application_product.yml
  4. 0 119
      docker/gbDocker/createConfig.sh
  5. 0 18
      docker/gbDocker/gb28181/keys/localhost_cert.pem
  6. 0 28
      docker/gbDocker/gb28181/keys/localhost_key.pem
  7. 0 121
      docker/gbDocker/install.sh
  8. 0 20
      docker/gbDocker/reConfigGB.sh
  9. 0 3
      docker/gbDocker/restart.sh
  10. 0 1
      docker/gbDocker/run.sh
  11. 0 2
      docker/installDocker.sh
  12. 0 15
      docker/mysqlDocker/Dockerfile
  13. 0 34
      docker/mysqlDocker/base.cnf
  14. 0 1
      docker/mysqlDocker/docker
  15. 0 489
      docker/mysqlDocker/init.sql
  16. 0 4
      docker/mysqlDocker/initDocker.sh
  17. 0 150
      docker/mysqlDocker/install.sh
  18. 0 8
      docker/mysqlDocker/installMysql.sh
  19. 0 35
      docker/mysqlDocker/my.cnf
  20. 0 3
      docker/mysqlDocker/restart.sh
  21. 0 1
      docker/mysqlDocker/run.sh
  22. 0 8
      docker/reConfig.sh
  23. 0 13
      docker/redisDocker/Dockerfile
  24. 0 17
      docker/redisDocker/docker-compse.yml
  25. 0 123
      docker/redisDocker/install.sh
  26. BIN
      docker/redisDocker/redis-7.0.7.tar.gz
  27. 0 1254
      docker/redisDocker/redis.conf
  28. 0 3
      docker/redisDocker/restart.sh
  29. 0 1
      docker/redisDocker/run_redis.sh
  30. 0 19
      docker/restartServer.sh
  31. 0 57
      docker/test.sh
  32. 0 56
      docker/zlmDocker/Dockerfile
  33. 0 159
      docker/zlmDocker/config.ini
  34. 0 116
      docker/zlmDocker/install.sh
  35. 3 0
      package/config.cfg
  36. 6 2
      package/startDockerImage.sh
  37. 0 0
      package/updateServer.sh
  38. 7 2
      src/main/java/com/genersoft/iot/vmp/gb28181/transmit/event/request/impl/message/response/cmd/PresetQueryResponseMessageHandler.java
  39. 14 5
      src/main/java/com/genersoft/iot/vmp/vmanager/gb28181/ptz/PtzController.java
  40. 92 236
      web_src/package-lock.json
  41. 1 0
      web_src/package.json
  42. 2 0
      web_src/src/components/GBRecordDetail.vue
  43. 99 36
      web_src/src/components/common/ptzControl.vue

+ 0 - 23
docker/config.cfg

@@ -1,23 +0,0 @@
-# 配置国标平台所需启动参数
-# redis 相关配置
-redis_host=0.0.0.0
-redis_port=7654
-redis_db=6
-redis_passwd="hfyredis28181"
-# mysql 相关配置
-mysql_host=0.0.0.0
-mysql_port=6543
-mysql_user="hfygb"
-mysql_passwd="hfygb28181"
-mysql_db="gb_db"
-# 国标平台相关配置参数
-# 是否启用https
-gb_enableHttps=false
-# https证书地址
-gb_WebPort=29001
-gb_SIPPort=29000
-# 国标sip地址
-gb_Host="192.168.1.203"
-gb_domain=3402000000
-gb_id=34020000002000000001
-gb_password=12345678

+ 0 - 26
docker/gbDocker/Dockerfile

@@ -1,26 +0,0 @@
-FROM ubuntu:20.04
-MAINTAINER kindring
-VOLUME "/data"
-ENV  LANG="en_US.utf8"
-# 国标信令端口 tcp与udp同事放行
-EXPOSE 29000/tcp
-EXPOSE 29000/udp
-# web控制面板端口
-EXPOSE 29001/tcp
-#ENV LC_ALL zh_CN.UTF-8
-
-RUN mkdir /usr/local/jvm/
-# 解压配置java环境
-COPY ./jdk-8-linux-x64.tar.gz ./
-RUN tar -xvzf jdk-8-linux-x64.tar.gz -C /usr/local/jvm
-ENV JAVA_HOME /usr/local/jvm/jdk1.8.0_341
-ENV JRE_HOME ${JAVA_HOME}/jre
-ENV CLASSPATH .:${JAVA_HOME}/lib:${JRE_HOME}/lib
-ENV PATH ${JAVA_HOME}/bin:$PATH
-# 配置智慧国标平台
-#COPY ./gb28181 /data/hfygb
-# 拷贝配置文件
-#COPY ./application_product.yml /data/hfygb/config
-WORKDIR /data/hfygb
-CMD java -jar ./target/hfy-gb.jar  --server.address=0.0.0.0 --spring.config.location=/data/hfygb/config/application_product.yml
-

+ 0 - 111
docker/gbDocker/application_product.yml

@@ -1,111 +0,0 @@
-#hfy 国标平台配置文件
-spring:
-  redis:
-    #[必须修改] Redis服务器IP, REDIS安装在本机的,使用127.0.0.1:
-    host: 127.0.0.1
-    port: 7654
-    database: 6
-    password: hfyredis28181
-    timeout: 10000
-    #[可选] 一个pool最多可分配多少个jedis实例
-    poolMaxTotal: 1000
-    #[可选] 一个pool最多有多少个状态为idle(空闲)的jedis实例
-    poolMaxIdle: 500
-    #[可选] 最大的等待时间(秒)
-    poolMaxWait: 5
-  #[必选] jdbc数据库配置
-  datasource:
-    type: com.alibaba.druid.pool.DruidDataSource
-    driver-class-name: com.mysql.cj.jdbc.Driver
-    url: jdbc:mysql://127.0.0.1:6543/gb_db?useUnicode=true&characterEncoding=UTF8&rewriteBatchedStatements=true&serverTimezone=PRC&useSSL=true&allowMultiQueries=true&AllowPublicKeyRetrieval=true
-    username: hfygb
-    password: hfygb28181
-    druid:
-      initialSize: 10                       # 连接池初始化连接数
-      minIdle: 5                            # 连接池最小空闲连接数
-      maxWait: 60000                        # 获取连接时最大等待时间,单位毫秒。配置了maxWait之后,缺省启用公平锁,并发效率会有所下降,如果需要可以通过配置useUnfairLock属性为true使用非公平锁。
-      keepAlive: true                       # 连接池中的minIdle数量以内的连接,空闲时间超过minEvictableIdleTimeMillis,则会执行keepAlive操作。
-      validationQuery: select 1             # 检测连接是否有效sql,要求是查询语句,常用select 'x'。如果validationQuery为null,testOnBorrow、testOnReturn、testWhileIdle都不会起作用。
-      testWhileIdle: true                   # 建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。
-      testOnBorrow: false                   # 申请连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。
-      testOnReturn: false                   # 归还连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。
-      poolPreparedStatements: false         # 是否開啟PSCache,並且指定每個連線上PSCache的大小
-      timeBetweenEvictionRunsMillis: 60000  # 配置間隔多久才進行一次檢測,檢測需要關閉的空閒連線,單位是毫秒
-      minEvictableIdleTimeMillis: 300000    # 配置一個連線在池中最小生存的時間,單位是毫秒
-      filters: stat,slf4j                   # 配置监控统计拦截的filters,监控统计用的filter:sta, 日志用的filter:log4j
-      useGlobalDataSourceStat: true         # 合并多个DruidDataSource的监控数据
-      connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=1000
-      maxActive: 200                        # 连接池最大连接数
-server:
-  address: 0.0.0.0
-  # 国标平台web端口
-  port: 29001
-  # [可选] HTTPS配置, 默认不开启
-  ssl:
-    # [可选] 是否开启HTTPS访问
-    enabled: false
-    # [可选] 证书文件路径,放置在resource/目录下即可,修改xxx为文件名
-    key-store: classpath:xxx.jks
-    # [可选] 证书密码
-    key-store-password: password
-    # [可选] 证书类型, 默认为jks,根据实际修改
-    key-store-type: JKS
-    # 配置证书可以使用如下两项,如上面二选一即可
-    # PEM 编码证书
-    certificate: xx.pem
-    # 私钥文件
-    # certificate-private-key: xx.key
-sip:
-  # [必须修改] 本机的IP, 必须是网卡上的IP,用于sip下协议栈监听ip,如果监听所有设置为0.0.0.0
-  monitor-ip: 127.0.0.1
-  # [必须修改] 本机的IP,zlm需要可与此ip通信
-  ip: 0.0.0.0
-  # [必须修改] 国标协议sip信令端口
-  port: 29000
-  # 根据国标协议6.1.2的规则制定的 10位数
-  domain: 3402000000
-  # 平台id
-  id: 34020000002000000001
-  # sip认证密码
-  password: 12345678
-  # 设备心跳超时时间
-  keepalive-timeout: 255
-  # [可选] 国标级联注册失败,再次发起注册的时间间隔。 默认60秒
-  register-time-interval: 60
-  # [可选] 云台控制速度
-  ptz-speed: 50
-  # 是否存储alarm信息
-  alarm: false
-  # hfy ai图片存储位置
-  mediaPath: mFile
-media:
-  # zlm服务器唯一id,用于触发hook时区别是哪台服务器,general.mediaServerId
-  id: your_server_id
-  # zlm服务器的IP
-  ip: szgpay.ticp.net
-  #  wvp在国标信令中使用的ip,此ip为摄像机可以访问到的ip, 置空使用 media.ip
-  sdp-ip: szgpay.ticp.net
-  # zlm服务器的hook所使用的IP, 默认使用sip.ip
-  hook-ip: szgpay.ticp.net
-  # zlm服务器的http.port
-  http-port: 15070
-  # [可选] 是否自动配置ZLM, 如果希望手动配置ZLM, 可以设为false, 不建议新接触的用户修改
-  auto-config: true
-  # zlm服务器的hook.admin_params=secret
-  secret: 
-  # [可选] zlm服务器的http.sslport, 置空使用zlm配置文件配置
-  http-ssl-port:
-  rtmp-port:
-  rtmp-ssl-port:
-  rtp-proxy-port:
-  rtsp-port:
-  rtsp-ssl-port:
-  # [可选] zlm服务器的general.streamNoneReaderDelayMS
-  stream-none-reader-delay-ms:  60000
-  rtp:
-    # 是否启用多端口模式, 开启后会在portRange范围内选择端口用于媒体流传输,建议q
-    enable: true
-    # 端口范围
-    port-range: 35000,40000
-    # 发送给设备所使用的端口范围
-    send-port-range: 35000,40000

+ 0 - 119
docker/gbDocker/createConfig.sh

@@ -1,119 +0,0 @@
-#!/bin/bash -e
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-p=$SCRIPT_DIR"/application_product.yml"
-t1="  "
-t2="    "
-t3="      "
-GBWebPort=$gb_WebPort
-GBSipPort=$gb_SIPPort
-GBExternalHost=$gb_Host
-GBSipDomain=$gb_domain
-GBSipID=$gb_id
-GBSipPASSWD=$gb_password
-GBExternalHost=$gb_externalHost
-GBEnableHttps=$gb_enableHttps
-#mediaSecret=$media_Secret
-
-echo "GBWebPort"
-finalMysqlHost=$mysqlHost;
-finalRedisHost=$redisHost;
-if [ "$redisHost" = "0.0.0.0" ];then
-  finalRedisHost="127.0.0.1";
-fi
-if [ "$mysqlHost" = "0.0.0.0" ];then
-  finalMysqlHost="127.0.0.1";
-fi
-echo "#hfy 国标平台配置文件" > "$p"
-echo "spring:" >> "$p"
-echo "${t1}servlet:" >> "$p"
-echo "${t2}multipart:" >> "$p"
-echo "${t3}max-file-size: 10MB" >> "$p"
-echo "${t3}max-request-size: 100MB" >> "$p"
-#  redis 配置项
-echo "${t1}redis:" >> "$p"
-echo "${t2}#[必须修改] Redis服务器IP, REDIS安装在本机的,使用127.0.0.1:" >> "$p"
-echo "${t2}host: $finalRedisHost" >> "$p"
-echo "${t2}port: $redisPort" >> "$p"
-echo "${t2}database: $redisDB" >> "$p"
-echo "${t2}password: $redisPasswd" >> "$p"
-echo "${t2}timeout: 10000" >> "$p"
-echo "${t2}#[可选] 一个pool最多可分配多少个jedis实例" >> "$p"
-echo "${t2}poolMaxTotal: 1000" >> "$p"
-echo "${t2}#[可选] 一个pool最多有多少个状态为idle(空闲)的jedis实例" >> "$p"
-echo "${t2}poolMaxIdle: 500" >> "$p"
-echo "${t2}#[可选] 最大的等待时间(秒)" >> "$p"
-echo "${t2}poolMaxWait: 7" >> "$p"
-
-# [必选] jdbc数据库配置
-#  redis 配置项
-echo "${t1}#[必选] jdbc数据库配置" >> "$p"
-echo "${t1}datasource:" >> "$p"
-echo "${t2}type: com.alibaba.druid.pool.DruidDataSource" >> "$p"
-echo "${t2}driver-class-name: com.mysql.cj.jdbc.Driver" >> "$p"
-echo "${t2}url: jdbc:mysql://$finalMysqlHost:$mysqlPort/$mysqlDB?useUnicode=true&characterEncoding=UTF8&rewriteBatchedStatements=true&serverTimezone=PRC&useSSL=true&allowMultiQueries=true&AllowPublicKeyRetrieval=true" >> "$p"
-echo "${t2}username: $mysqlUser" >> "$p"
-echo "${t2}password: $mysqlPasswd" >> "$p"
-echo "${t2}druid:" >> "$p"
-echo "${t3}initialSize: 10                       # 连接池初始化连接数" >> "$p"
-echo "${t3}minIdle: 5                            # 连接池最小空闲连接数" >> "$p"
-echo "${t3}maxWait: 60000                        # 获取连接时最大等待时间,单位毫秒。配置了maxWait之后,缺省启用公平锁,并发效率会有所下降,如果需要可以通过配置useUnfairLock属性为true使用非公平锁。" >> "$p"
-echo "${t3}keepAlive: true                       # 连接池中的minIdle数量以内的连接,空闲时间超过minEvictableIdleTimeMillis,则会执行keepAlive操作。" >> "$p"
-echo "${t3}validationQuery: select 1             # 检测连接是否有效sql,要求是查询语句,常用select 'x'。如果validationQuery为null,testOnBorrow、testOnReturn、testWhileIdle都不会起作用。" >> "$p"
-echo "${t3}testWhileIdle: true                   # 建议配置为true,不影响性能,并且保证安全性。申请连接的时候检测,如果空闲时间大于timeBetweenEvictionRunsMillis,执行validationQuery检测连接是否有效。" >> "$p"
-echo "${t3}testOnBorrow: false                   # 申请连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。" >> "$p"
-echo "${t3}testOnReturn: false                   # 归还连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。" >> "$p"
-echo "${t3}poolPreparedStatements: false         # 是否開啟PSCache,並且指定每個連線上PSCache的大小" >> "$p"
-echo "${t3}timeBetweenEvictionRunsMillis: 60000  # 配置間隔多久才進行一次檢測,檢測需要關閉的空閒連線,單位是毫秒" >> "$p"
-echo "${t3}minEvictableIdleTimeMillis: 300000    # 配置一個連線在池中最小生存的時間,單位是毫秒" >> "$p"
-echo "${t3}filters: stat,slf4j                   # 配置监控统计拦截的filters,监控统计用的filter:sta, 日志用的filter:log4j" >> "$p"
-echo "${t3}useGlobalDataSourceStat: true         # 合并多个DruidDataSource的监控数据" >> "$p"
-echo "${t3}connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=1000" >> "$p"
-echo "${t3}maxActive: 200                        # 连接池最大连接数" >> "$p"
-
-echo "server:" >> "$p"
-echo "${t1}address: 0.0.0.0" >> "$p"
-echo "${t1}# 国标平台web端口" >> "$p"
-echo "${t1}port: $gb_WebPort" >> "$p"
-echo "${t1}# [可选] HTTPS配置, 默认不开启" >> "$p"
-echo "${t1}ssl:" >> "$p"
-echo "${t2}# [可选] 是否开启HTTPS访问" >> "$p"
-echo "${t2}enabled: $GBEnableHttps" >> "$p"
-echo "${t2}# [可选] 证书文件路径,放置在resource/目录下即可,修改xxx为文件名" >> "$p"
-echo "${t2}key-store: classpath:xxx.jks" >> "$p"
-echo "${t2}# [可选] 证书密码" >> "$p"
-echo "${t2}key-store-password: password" >> "$p"
-echo "${t2}# [可选] 证书类型, 默认为jks,根据实际修改" >> "$p"
-echo "${t2}key-store-type: JKS" >> "$p"
-echo "${t2}# 配置证书可以使用如下两项,如上面二选一即可" >> "$p"
-echo "${t2}# PEM 编码证书" >> "$p"
-echo "${t2}certificate: keys/cert.pem" >> "$p"
-echo "${t2}# 私钥文件" >> "$p"
-echo "${t2}certificate-private-key: keys/key.pem" >> "$p"
-
-echo "sip:" >> "$p"
-echo "${t1}# [必须修改] 本机的IP, 必须是网卡上的IP,用于sip下协议栈监听ip,如果监听所有设置为0.0.0.0" >> "$p"
-echo "${t1}monitor-ip: 0.0.0.0" >> "$p"
-echo "${t1}# [必须修改] 本机的IP,zlm需要可与此ip通信" >> "$p"
-echo "${t1}ip: $GBExternalHost" >> "$p"
-echo "${t1}# [必须修改] 国标协议sip信令端口" >> "$p"
-echo "${t1}port: $GBSipPort" >> "$p"
-echo "${t1}# 根据国标协议6.1.2的规则制定的 10位数" >> "$p"
-echo "${t1}domain: $GBSipDomain" >> "$p"
-echo "${t1}# 平台id" >> "$p"
-echo "${t1}id: $GBSipID" >> "$p"
-echo "${t1}# sip认证密码" >> "$p"
-echo "${t1}password: $GBSipPASSWD" >> "$p"
-echo "${t1}# 设备心跳超时时间" >> "$p"
-echo "${t1}keepalive-timeout: 255" >> "$p"
-echo "${t1}# [可选] 国标级联注册失败,再次发起注册的时间间隔。 默认60秒" >> "$p"
-echo "${t1}register-time-interval: 60" >> "$p"
-echo "${t1}# [可选] 云台控制速度" >> "$p"
-echo "${t1}ptz-speed: 50" >> "$p"
-echo "${t1}# 是否存储alarm信息" >> "$p"
-echo "${t1}alarm: false" >> "$p"
-echo "${t1}# hfy ai图片存储位置" >> "$p"
-echo "${t1}mediaPath: mFile" >> "$p"
-echo "# [可选] 日志配置, 一般不需要改" >> "$p"
-echo "logging:" >> "$p"
-echo "${t1}config: classpath:logback-spring-local.xml" >> "$p"
-

+ 0 - 18
docker/gbDocker/gb28181/keys/localhost_cert.pem

@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC/DCCAeSgAwIBAgIRANu6zHtr18s6RTaFm7zzJ5AwDQYJKoZIhvcNAQELBQAw
-EzERMA8GA1UEChMITGl2ZVFpbmcwHhcNMjIwNzA4MDY1NDU1WhcNMjMwNzA4MDY1
-NDU1WjATMREwDwYDVQQKEwhMaXZlUWluZzCCASIwDQYJKoZIhvcNAQEBBQADggEP
-ADCCAQoCggEBALe7hxQ+A7hmFHvdop+39pHu5UUNtHG3KjJJAbSvB81hDNa46jYv
-/g1SMhqsl6NXvDop4RbOd1tdeftZNp6xrGp/b687J/stZs1biXaqPhQymCN/qssK
-d+FW7dQPrRMkgo6o2ZUR5/P/3cryoNT9cWb7+fVlls8o09U0B1PTvkt9/TACqIU1
-6VJiu8jLFnGfFrHBVgKKTL+bpeYwsqeDY5tYhmlosbK3I2pI88iw4DTbgfMCQR/u
-GQGGLI2Aa1T/LwdrpAsHrWNZvTjavZssAPWy0ET4bRMgq0KUkAO1ll625ez6pzM4
-UCYSCADnOMEeNnT4ZGMbX9mb+jbWiosE8X0CAwEAAaNLMEkwDgYDVR0PAQH/BAQD
-AgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwFAYDVR0RBA0w
-C4IJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBTSOZNLdBGrVcSiouDgxke
-ic5bkJUP4RHWXptsC3ZVejbv86u4Nv0pAAhJ3/zQBz7rzUSnr7K/x/EsK6OfLi5u
-kiyF7KE8YYW2UdqVY6LIQJz2UzQ5oqqh+iXSyYnBH7mazkOVUhL6vq0bYWwps6hs
-mTu93uGgPYHhVu3yD9D7KbHoRO+MTKyqLM+GkGKnV0vYuGnC8B2GEIFx3cdgGFIt
-olHCH6d3vhy5O84UWEw19JO62ET0AhrOgObxJjyMt8Qcr7WzIke5LwyHKL7xBI40
-KP1e4nk9hHDozj1rqRW/vKtOtuJrZnjMyvxe7Q02n+KG06sTiDYdTbNugQrpfF9R
------END CERTIFICATE-----

+ 0 - 28
docker/gbDocker/gb28181/keys/localhost_key.pem

@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC3u4cUPgO4ZhR7
-3aKft/aR7uVFDbRxtyoySQG0rwfNYQzWuOo2L/4NUjIarJejV7w6KeEWzndbXXn7
-WTaesaxqf2+vOyf7LWbNW4l2qj4UMpgjf6rLCnfhVu3UD60TJIKOqNmVEefz/93K
-8qDU/XFm+/n1ZZbPKNPVNAdT075Lff0wAqiFNelSYrvIyxZxnxaxwVYCiky/m6Xm
-MLKng2ObWIZpaLGytyNqSPPIsOA024HzAkEf7hkBhiyNgGtU/y8Ha6QLB61jWb04
-2r2bLAD1stBE+G0TIKtClJADtZZetuXs+qczOFAmEggA5zjBHjZ0+GRjG1/Zm/o2
-1oqLBPF9AgMBAAECggEANLM3ZZj8HDI7gePqt4Fe2jD51jq+HTf6hH3oXVkvFbvW
-oOhdVbmiV1PSR1BmFlYeCVjl1VLXbX98c5n9TruLxuhSsqJ3L1awfArbfQwLzmNM
-vDHsXVzdUedLRF5SuIDqad0Iu/ds3tHvXj0F4NHpay6vXbIL9dOyHx9TUroThTdb
-l/IUsXFrt1JoWoeRR9ABqLIqTPgcPrJgas5y3n9d9C1WPfUvNIgOZnjRgZQ8rufC
-2x31MQx8VB1L0XVnAMrcr+vQcLR03+E5LTQR14Zt7Minu+NdCn7S6982dzwrKM1D
-nss6unuMG8DmCjqJY2oVeMAJwcbFRH/Ee2D228U4AQKBgQDQAPPynmTh0l9kH2n6
-r3+0JNF1rt+InB3xP6N9+smUGDxsYUThbBkbIGndCecmRbQ6PwcLMaQkiDa6RddS
-3T32+/w71stwEpM0idVDYGf66rJr3PJbtxEFes6L2g5Zfif2nOcDpdIKnnCnc653
-Y23FY1F0oQUR6g9NH8QJLxnvAQKBgQDiINgeeIFfaSjaJBwsoFvGxd1XchzDclxa
-PC32bOgNgnc7FG57PvlKaaPuNhbblV/avksCvShoVOZQD19U52ab1ITIesRLM6hs
-Nxt/du2tJv5Cu+z/RrWOQ/Hj73B1fVQR7h8A0MepsIbL9XMaIb924uSYFcB3dGyo
-zF5wTHk+fQKBgA98Mq0aS6pgLgfXKt2l2R0BLLLVLQEPlk8SjMphNEaZM05iQ22A
-zbGuTRNOnG3F/K/lZ1HXBUBO/7wIXQpnPCi6A8I0sHqMd3cO19inQbZ1aIkNLuHT
-FCpTRTM2LaFN+g2rCqvD2Yo+12Yv/4oV1eM/DJ5hKm51/WOhfMeuQQEBAoGBAKuo
-LyeCZwNcuuxNuS2PVdwg3BPDxl3/9Gdzg5irt8R5o7Uvn2bjuS5p3023tcZfMHLH
-Pf4IRiJdQLDJ2p6DQvtMargdqeFEh7gOZUtvpqFpFl7mddSbgrtKjNdxLjeiOi2a
-S+K8mrUQEmPGIehWchf3yXxCFucXdFz3N0Gca/WpAoGAarvoLU3WUXTH1UC/0HrC
-hIKZQTfYNCclL+L15wsgEPEJOsyQ+JQs742lRD11Tu0Uzn4+cZjtX7Pfoxwkwq01
-G1o3/zvNQ0cXNLBETk+JcUkKIfiTRHLGsEqKYoiHFhoxGe3LzxFKqPtwUEaezXoX
-RIpukrApa0gnKztvhAJ2xGE=
------END PRIVATE KEY-----

+ 0 - 121
docker/gbDocker/install.sh

@@ -1,121 +0,0 @@
-#!/bin/bash -e
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-
-
-#获取国标参数
-function getGbConfig(){
-  _useConfig=$1
-  w1=1
-  while [[ $w1 == 1 ]]
-  do
-    clear
-    if [  "$_useConfig" == "1" ]; then
-      GBWebPort=$gb_WebPort
-      GBSipPort=$gb_SIPPort
-      GBExternalHost=$gb_Host
-      GBSipDomain=$gb_domain
-      GBSipID=$gb_id
-      GBSipPASSWD=$gb_password
-      GBEnableHttps=$gb_enableHttps
-      w1=2
-    else
-      echo "开始安装配置国标平台,请按照提示输入信息"
-      _GBWebPort=29001
-      _GBSipPort=29000
-      _GBExternalHost="192.168.1.26"
-      _GBSipDomain=3402000000
-      _GBSipID=34020000002000000001
-      _GBSipPASSWD=12345678
-      _GBEnableHttps=false
-      read -p "请输入国标平台所使用的端口($_GBWebPort): " GBWebPort
-      GBWebPort=${GBWebPort:-$_GBWebPort}
-      read -p "请输入sip端口($_GBSipPort): " GBSipPort
-      GBSipPort=${GBSipPort:-_GBSipPort}
-      read -p "请输入可被zlm的ip地址($_GBExternalHost): " GBExternalHost
-      GBExternalHost=${GBExternalHost:-_GBExternalHost}
-      read -p "请输入国标域($_GBSipDomain): " GBSipDomain
-      GBSipDomain=${GBSipDomain:-_GBDomain}
-      read -p "请输入国标平台id($_GBSipID): " GBSipID
-      GBSipID=${GBSipID:-_GBSipID}
-      read -p "请输入国标平台密码$_GBSipPASSWD): " GBSipPASSWD
-      GBSipPASSWD=${GBSipPASSWD:-_GBSipPASSWD}
-      w3=1
-      while [[ $w3 == 1 ]]
-      do
-        echo "是否启用https? y/n 语音对讲功能必须使用https,密钥文件放置于 gb28181/keys"
-        read _yn
-        if [[ $_yn == "y" ]] || [[ $_yn == "yes" ]] || [[ $_yn == "Y" ]] || [[ $_yn == "YES" ]] ; then
-          echo "启用";
-          GBEnableHttps=true
-          w3=2;
-          break;
-        elif [[ $_yn == "n" ]] || [[ $_yn == "n" ]] || [[ $_yn == "N" ]] || [[ $_yn == "NO" ]] ; then
-          echo "不启用"
-          GBEnableHttps=false
-          w2=2;
-        else
-          echo "---------------------"
-        fi
-      done
-
-      w2=1
-      while [[ $w2 == 1 ]]
-      do
-        echo "
-                国标平台服务器信息
-                web端口=$GBWebPort
-                sip端口=$GBSipPort
-                访问地址=$GBExternalHost
-                国标域=$GBSipDomain
-                国标平台id=$GBSipID
-                国标密码=$GBSipPASSWD
-                启用https=$GBEnableHttps
-                "
-        echo "是否确认为此信息?y/n"
-        read _yn
-        if [[ $_yn == "y" ]] || [[ $_yn == "yes" ]] || [[ $_yn == "Y" ]] || [[ $_yn == "YES" ]] ; then
-          echo "确认信息";
-          w1=2;
-          break;
-        elif [[ $_yn == "n" ]] || [[ $_yn == "n" ]] || [[ $_yn == "N" ]] || [[ $_yn == "NO" ]] ; then
-          echo "重新输入"
-          w2=2;
-        else
-          echo "---------------------"
-        fi
-      done
-    fi
-  done
-}
-
-# 输入参数
-installMode=$1
-installMode=${installMode:-2}
-cd $SCRIPT_DIR
-getGbConfig "$installMode"
-echo "
-                国标平台服务器信息
-                web端口=$GBWebPort
-                sip端口=$GBSipPort
-                访问地址=$GBExternalHost
-                国标域=$GBSipDomain
-                国标平台id=$GBSipID
-                国标密码=$GBSipPASSWD
-                启用https=$GBEnableHttps
-                "
-# 开始编辑 gb 配置
-source $SCRIPT_DIR"/createConfig.sh"
-
-# 开始执行命令
-docker build -t hfy_gb:latest .
-docker stop hfy_gp
-docker rm hfy_gp
-#echo "$GBWebPort":"$GBWebPort" -p "$GBSipPort":"$GBSipPort" -p "$rtpPortStart"-"$rtpPortEnd":"$rtpPortStart"-"$rtpPortEnd"
-sudo docker run -itd --net=host  \
- -v /data/gb:/data/gb -e LANG=C.UTF-8 \
- -v $SCRIPT_DIR"/gb28181":/data/hfygb \
- -v $SCRIPT_DIR"/application_product.yml":/data/hfygb/config/"application_product.yml" --name hfy_gp -it hfy_gb:latest
-
-
- # 导出镜像
-

+ 0 - 20
docker/gbDocker/reConfigGB.sh

@@ -1,20 +0,0 @@
-#!/bin/bash -e
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-cd $SCRIPT_DIR
-
-
-
-# 生成配置
-source $SCRIPT_DIR"/createConfig.sh"
-
-
-echo "
-                国标平台服务器信息
-                web端口=$GBWebPort
-                sip端口=$GBSipPort
-                访问地址=$GBExternalHost
-                国标域=$GBSipDomain
-                国标平台id=$GBSipID
-                国标密码=$GBSipPASSWD
-                "
-sudo docker restart hfy_gp

+ 0 - 3
docker/gbDocker/restart.sh

@@ -1,3 +0,0 @@
-#!/bin/bash -e
-echo "run: docker restart hfy_gp"
-docker restart hfy_gp

+ 0 - 1
docker/gbDocker/run.sh

@@ -1 +0,0 @@
-sudo docker run -p 29000:29000 -p 29001:29001 -v /data/gb:/data/gb --name hfy_gb -it hfy_gb:latest

+ 0 - 2
docker/installDocker.sh

@@ -1,2 +0,0 @@
-#!/bin/bash
-

+ 0 - 15
docker/mysqlDocker/Dockerfile

@@ -1,15 +0,0 @@
-FROM mysql:8.0
-MAINTAINER kindring
-EXPOSE 3306
-LABEL version="0.1" description="hfy 国标mysql服务器" by="hfy"
-# 设置docker 内的工作目录
-ENV WORK_PATH /data/mysql
-ENV AUTO_RUN_DIR /data/mysql/sh
-
-ENV FILE_0 init.sql
-ENV INSTALL_DATA_SHELL initDocker.sh
-RUN mkdir -p $WORK_PATH
-COPY ./$FILE_0 $WORK_PATH/
-COPY ./$INSTALL_DATA_SHELL $AUTO_RUN_DIR/
-RUN cp $WORK_PATH/$FILE_0 /docker-entrypoint-initdb.d
-#RUN #chmod a+x $AUTO_RUN_DIR/$INSTALL_DATA_SHEL

+ 0 - 34
docker/mysqlDocker/base.cnf

@@ -1,34 +0,0 @@
-[mysqld]
-port=3306
-user=hfyroot
-# mysql 数据文件位置
-datadir=/data/mysql
-
-# pid
-pid-file=/var/run/mysqld/mysqld.pid
-socket=/var/run/mysqld/mysqld.sock
-default-time-zone = '+8:00'
-
-bind_address=0.0.0.0
-#只能用ip地址检查客户端的登陆
-skip_name_resolve=1
-
-
-#事务隔离级别,默认为可重复读,mysql默认可重复读级别(此级别下可能参数很多间隙锁,影响性能)
-transaction_isolation=READ-COMMITTED
-
-
-#最大连接数
-max_connections=400
- 
-#最大错误连接数
-max_connect_errors=1000
- 
-#TIMESTAMP如果没有显示声明NOT NULL,允许NULL值
-explicit_defaults_for_timestamp=true
- 
-#SQL数据包发送的大小,如果有BLOB对象建议修改成1G
-max_allowed_packet=10M
-
-
-

+ 0 - 1
docker/mysqlDocker/docker

@@ -1 +0,0 @@
-sh $AUTO_RUN_DIR/$INSTALL_DATA_SHELL exec -it hfygbmysql bash

+ 0 - 489
docker/mysqlDocker/init.sql

@@ -1,489 +0,0 @@
-/*
- Navicat Premium Data Transfer
-
- Source Server         : kindring.cn_6543
- Source Server Type    : MySQL
- Source Server Version : 80031
- Source Host           : kindring.cn:6543
- Source Schema         : gb_db
-
- Target Server Type    : MySQL
- Target Server Version : 80031
- File Encoding         : 65001
-
- Date: 03/04/2023 17:54:54
-*/
-
-SET NAMES utf8mb4;
-SET FOREIGN_KEY_CHECKS = 0;
-create DATABASE if not exists gb_db character set utf8;
-create user 'hfygb'@'%' identified by 'hfygb28181';
-GRANT all on gb_db.* TO 'hfygb'@'%';GRANT all on gb_db.* TO 'root'@'%';
-use gb_db;
--- ----------------------------
--- Table structure for ai_alarm
--- ----------------------------
-DROP TABLE IF EXISTS `ai_alarm`;
-CREATE TABLE `ai_alarm`  (
-                             `alarmId` int(0) NOT NULL AUTO_INCREMENT,
-                             `arithmetic` int(0) NULL DEFAULT NULL COMMENT '算法类型',
-                             `mediaPath` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '媒体资源地址',
-                             `alarmState` int(0) NOT NULL DEFAULT 1 COMMENT '告警状态 1 未读 2 忽略 3 已处理完成',
-                             `createTime` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '报警时间戳,unix',
-                             `operationTime` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '最后操作时间',
-                             `triggerType` int(0) NULL DEFAULT NULL COMMENT '触发类型 ',
-                             `rawMediaPath` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '原始图片地址',
-                             `deviceId` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '设备id',
-                             `channelId` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '通道id',
-                             `infoNum` int(0) NULL DEFAULT NULL COMMENT '子项数量',
-                             `firmware_version` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '' COMMENT '固件版本',
-                             `devTime` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '0' COMMENT '设备创建文件的时间戳',
-                             `signal` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '-110dbm' COMMENT '信号值',
-                             `temp_env` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '' COMMENT '环境温度',
-                             `temp_cpu` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '' COMMENT 'cpu温度',
-                             `ccid` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '' COMMENT '4g卡上网时的ccid',
-                             `zoom_rate` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '' COMMENT '对焦信息',
-                             `battery` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT '' COMMENT '电池电压',
-                             `alarmType` int(0) NULL DEFAULT 0 COMMENT '是否有匹配到库 0未匹配 1匹配到了',
-                             PRIMARY KEY (`alarmId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 8619 CHARACTER SET = utf8mb3 COLLATE = utf8mb3_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for ai_alarm_item
--- ----------------------------
-DROP TABLE IF EXISTS `ai_alarm_item`;
-CREATE TABLE `ai_alarm_item`  (
-                                  `itemId` int(0) NOT NULL AUTO_INCREMENT,
-                                  `alarmId` int(0) NOT NULL,
-                                  `score` float(255, 0) UNSIGNED ZEROFILL NULL DEFAULT NULL COMMENT '相似度',
-  `x1` float(255, 0) UNSIGNED ZEROFILL NULL DEFAULT NULL,
-  `x2` float(255, 0) UNSIGNED ZEROFILL NULL DEFAULT NULL,
-  `y1` float(255, 0) UNSIGNED ZEROFILL NULL DEFAULT NULL,
-  `y2` float(255, 0) UNSIGNED ZEROFILL NULL DEFAULT NULL,
-  `info` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '文字描述信息',
-  `trait` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '特征码',
-  `uid` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT 'uid',
-  PRIMARY KEY (`itemId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 8516 CHARACTER SET = utf8mb3 COLLATE = utf8mb3_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for ai_configs
--- ----------------------------
-DROP TABLE IF EXISTS `ai_configs`;
-CREATE TABLE `ai_configs`  (
-                               `configId` int(0) NOT NULL AUTO_INCREMENT COMMENT '配置id',
-                               `libraryId` int(0) NOT NULL COMMENT '图像库id',
-                               `arithmetic` int(0) NOT NULL DEFAULT 1 COMMENT '使用的算法 1-3 人脸,火情,车牌',
-                               `triggerType` int(0) NOT NULL DEFAULT 1 COMMENT '触发方式 1-3 无限制,白名单,黑名单',
-                               `refreshTime` int(0) NULL DEFAULT 60 COMMENT '刷新数据时间 0-n 单位s',
-                               `score` float(10, 0) NULL DEFAULT 70 COMMENT '触发阈值 0-n',
-  `resourcePath` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '资源更新地址',
-  `uploadUrl` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '媒体资源上传地址',
-  `pushUrl` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '平台收到报警回调推送地址',
-  `configName` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT 'ai配置名称',
-  PRIMARY KEY (`configId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 14 CHARACTER SET = utf8mb3 COLLATE = utf8mb3_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for ai_library
--- ----------------------------
-DROP TABLE IF EXISTS `ai_library`;
-CREATE TABLE `ai_library`  (
-                               `libraryId` int(0) NOT NULL AUTO_INCREMENT COMMENT '图像库id',
-                               `libraryName` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL COMMENT '图像库名称',
-                               `version` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '1' COMMENT '图像库 版本uuid',
-                               `arithmetic` int(0) NOT NULL COMMENT '适用的算法 1-3 人脸,火情,车牌,',
-                               `total` int(0) NULL DEFAULT 0 COMMENT '数据库的图像数据',
-                               PRIMARY KEY (`libraryId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 6 CHARACTER SET = utf8mb3 COLLATE = utf8mb3_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for dev_ai_config
--- ----------------------------
-DROP TABLE IF EXISTS `dev_ai_config`;
-CREATE TABLE `dev_ai_config`  (
-                                  `deviceId` varchar(30) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL COMMENT '设备id',
-                                  `configId` int(0) NOT NULL COMMENT '配置id',
-                                  `arithmetic` int(0) NULL DEFAULT NULL COMMENT '对应算法'
-) ENGINE = InnoDB CHARACTER SET = utf8mb3 COLLATE = utf8mb3_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for device
--- ----------------------------
-DROP TABLE IF EXISTS `device`;
-CREATE TABLE `device`  (
-                           `id` int(0) NOT NULL AUTO_INCREMENT,
-                           `deviceId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                           `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `manufacturer` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `model` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `firmware` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `transport` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `streamMode` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `online` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `registerTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `keepaliveTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `ip` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                           `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                           `port` int(0) NULL DEFAULT NULL,
-                           `expires` int(0) NULL DEFAULT NULL,
-                           `keepaliveIntervalTime` int(0) NULL DEFAULT NULL,
-                           `subscribeCycleForCatalog` int(0) NULL DEFAULT NULL,
-                           `hostAddress` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `charset` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                           `subscribeCycleForMobilePosition` int(0) NULL DEFAULT NULL,
-                           `mobilePositionSubmissionInterval` int(0) NULL DEFAULT 5,
-                           `subscribeCycleForAlarm` int(0) NULL DEFAULT NULL,
-                           `ssrcCheck` int(0) NULL DEFAULT 0,
-                           `asMessageChannel` int(0) NULL DEFAULT 0,
-                           `geoCoordSys` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                           `treeType` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                           `custom_name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `sdpIp` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `localIp` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `mediaServerId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                           `audioEncodePt` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL COMMENT '音频编码pt值表, 0,8 ',
-                           PRIMARY KEY (`id`) USING BTREE,
-                           UNIQUE INDEX `device_deviceId_uindex`(`deviceId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 4 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for device_alarm
--- ----------------------------
-DROP TABLE IF EXISTS `device_alarm`;
-CREATE TABLE `device_alarm`  (
-                                 `id` int(0) NOT NULL AUTO_INCREMENT,
-                                 `deviceId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `channelId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `alarmPriority` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `alarmMethod` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `alarmTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `alarmDescription` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `longitude` double NULL DEFAULT NULL,
-                                 `latitude` double NULL DEFAULT NULL,
-                                 `alarmType` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 PRIMARY KEY (`id`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for device_channel
--- ----------------------------
-DROP TABLE IF EXISTS `device_channel`;
-CREATE TABLE `device_channel`  (
-                                   `id` int(0) NOT NULL AUTO_INCREMENT,
-                                   `channelId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                   `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `manufacture` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `model` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `owner` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `civilCode` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `block` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `address` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `parentId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `safetyWay` int(0) NULL DEFAULT NULL,
-                                   `registerWay` int(0) NULL DEFAULT NULL,
-                                   `certNum` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `certifiable` int(0) NULL DEFAULT NULL,
-                                   `errCode` int(0) NULL DEFAULT NULL,
-                                   `endTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `secrecy` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `ipAddress` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `port` int(0) NULL DEFAULT NULL,
-                                   `password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `PTZType` int(0) NULL DEFAULT NULL,
-                                   `status` int(0) NULL DEFAULT NULL,
-                                   `longitude` double NULL DEFAULT NULL,
-                                   `latitude` double NULL DEFAULT NULL,
-                                   `streamId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `deviceId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                   `parental` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `hasAudio` bit(1) NULL DEFAULT NULL,
-                                   `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                   `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                   `subCount` int(0) NULL DEFAULT 0,
-                                   `longitudeGcj02` double NULL DEFAULT NULL,
-                                   `latitudeGcj02` double NULL DEFAULT NULL,
-                                   `longitudeWgs84` double NULL DEFAULT NULL,
-                                   `latitudeWgs84` double NULL DEFAULT NULL,
-                                   `businessGroupId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   `gpsTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                   PRIMARY KEY (`id`) USING BTREE,
-                                   UNIQUE INDEX `device_channel_id_uindex`(`id`) USING BTREE,
-                                   UNIQUE INDEX `device_channel_pk`(`channelId`, `deviceId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 5 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for device_mobile_position
--- ----------------------------
-DROP TABLE IF EXISTS `device_mobile_position`;
-CREATE TABLE `device_mobile_position`  (
-                                           `id` int(0) NOT NULL AUTO_INCREMENT,
-                                           `deviceId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                           `channelId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                           `deviceName` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                           `time` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                           `longitude` double NOT NULL,
-                                           `latitude` double NOT NULL,
-                                           `altitude` double NULL DEFAULT NULL,
-                                           `speed` double NULL DEFAULT NULL,
-                                           `direction` double NULL DEFAULT NULL,
-                                           `reportSource` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                           `longitudeGcj02` double NULL DEFAULT NULL,
-                                           `latitudeGcj02` double NULL DEFAULT NULL,
-                                           `longitudeWgs84` double NULL DEFAULT NULL,
-                                           `latitudeWgs84` double NULL DEFAULT NULL,
-                                           `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                           PRIMARY KEY (`id`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for gb_stream
--- ----------------------------
-DROP TABLE IF EXISTS `gb_stream`;
-CREATE TABLE `gb_stream`  (
-                              `gbStreamId` int(0) NOT NULL AUTO_INCREMENT,
-                              `app` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              `stream` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              `gbId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                              `longitude` double NULL DEFAULT NULL,
-                              `latitude` double NULL DEFAULT NULL,
-                              `streamType` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                              `mediaServerId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                              `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                              PRIMARY KEY (`gbStreamId`) USING BTREE,
-                              UNIQUE INDEX `app`(`app`, `stream`) USING BTREE,
-                              UNIQUE INDEX `gbId`(`gbId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for lib_item
--- ----------------------------
-DROP TABLE IF EXISTS `lib_item`;
-CREATE TABLE `lib_item`  (
-                             `itemId` int(0) NOT NULL AUTO_INCREMENT,
-                             `libraryId` int(0) NULL DEFAULT NULL COMMENT '图像库id',
-                             `imageUrl` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL,
-                             `itemName` varchar(50) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '名称,用户名称或者车辆名称',
-                             `itemNo` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '标记号,用于车牌或者工号',
-                             `idCard` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '身份证号',
-                             `carNo` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '车牌号',
-                             `trait` varchar(255) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NULL DEFAULT NULL COMMENT '图像特征值',
-                             `itemType` int(0) NULL DEFAULT NULL COMMENT '数据类型 图片,文本',
-                             PRIMARY KEY (`itemId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 18 CHARACTER SET = utf8mb3 COLLATE = utf8mb3_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for log
--- ----------------------------
-DROP TABLE IF EXISTS `log`;
-CREATE TABLE `log`  (
-                        `id` int(0) NOT NULL AUTO_INCREMENT,
-                        `name` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        `type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        `uri` varchar(200) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        `address` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        `result` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        `timing` bigint(0) NOT NULL,
-                        `username` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                        PRIMARY KEY (`id`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1801 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for media_server
--- ----------------------------
-DROP TABLE IF EXISTS `media_server`;
-CREATE TABLE `media_server`  (
-                                 `id` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `ip` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `hookIp` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `sdpIp` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `streamIp` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `httpPort` int(0) NOT NULL,
-                                 `httpSSlPort` int(0) NOT NULL,
-                                 `rtmpPort` int(0) NOT NULL,
-                                 `rtmpSSlPort` int(0) NOT NULL,
-                                 `rtpProxyPort` int(0) NOT NULL,
-                                 `rtspPort` int(0) NOT NULL,
-                                 `rtspSSLPort` int(0) NOT NULL,
-                                 `autoConfig` int(0) NOT NULL,
-                                 `secret` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `rtpEnable` int(0) NOT NULL,
-                                 `rtpPortRange` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `recordAssistPort` int(0) NOT NULL,
-                                 `defaultServer` int(0) NOT NULL,
-                                 `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `hookAliveInterval` int(0) NOT NULL,
-                                 `externIP` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `externPort` int(0) NULL DEFAULT NULL,
-                                 PRIMARY KEY (`id`) USING BTREE,
-                                 UNIQUE INDEX `media_server_i`(`ip`, `httpPort`) USING BTREE
-) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for parent_platform
--- ----------------------------
-DROP TABLE IF EXISTS `parent_platform`;
-CREATE TABLE `parent_platform`  (
-                                    `id` int(0) NOT NULL AUTO_INCREMENT,
-                                    `enable` int(0) NULL DEFAULT NULL,
-                                    `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `serverGBId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                    `serverGBDomain` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `serverIP` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `serverPort` int(0) NULL DEFAULT NULL,
-                                    `deviceGBId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                    `deviceIp` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `devicePort` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `username` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `password` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `expires` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `keepTimeout` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `transport` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `characterSet` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `catalogId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                    `ptz` int(0) NULL DEFAULT NULL,
-                                    `rtcp` int(0) NULL DEFAULT NULL,
-                                    `asMessageChannel` int(0) NULL DEFAULT 0,
-                                    `status` bit(1) NULL DEFAULT NULL,
-                                    `startOfflinePush` int(0) NULL DEFAULT 0,
-                                    `administrativeDivision` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                    `catalogGroup` int(0) NULL DEFAULT 1,
-                                    `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                    `treeType` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                    PRIMARY KEY (`id`) USING BTREE,
-                                    UNIQUE INDEX `parent_platform_id_uindex`(`id`) USING BTREE,
-                                    UNIQUE INDEX `parent_platform_pk`(`serverGBId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for platform_catalog
--- ----------------------------
-DROP TABLE IF EXISTS `platform_catalog`;
-CREATE TABLE `platform_catalog`  (
-                                     `id` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                     `platformId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                     `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                     `parentId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                     `civilCode` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                     `businessGroupId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                     PRIMARY KEY (`id`) USING BTREE
-) ENGINE = InnoDB CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for platform_gb_channel
--- ----------------------------
-DROP TABLE IF EXISTS `platform_gb_channel`;
-CREATE TABLE `platform_gb_channel`  (
-                                        `id` int(0) NOT NULL AUTO_INCREMENT,
-                                        `platformId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                        `catalogId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                        `deviceChannelId` int(0) NOT NULL,
-                                        PRIMARY KEY (`id`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for platform_gb_stream
--- ----------------------------
-DROP TABLE IF EXISTS `platform_gb_stream`;
-CREATE TABLE `platform_gb_stream`  (
-                                       `id` int(0) NOT NULL AUTO_INCREMENT,
-                                       `platformId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                       `catalogId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                       `gbStreamId` int(0) NOT NULL,
-                                       PRIMARY KEY (`id`) USING BTREE,
-                                       UNIQUE INDEX `platform_gb_stream_pk`(`platformId`, `catalogId`, `gbStreamId`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for stream_proxy
--- ----------------------------
-DROP TABLE IF EXISTS `stream_proxy`;
-CREATE TABLE `stream_proxy`  (
-                                 `id` int(0) NOT NULL AUTO_INCREMENT,
-                                 `type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `app` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `stream` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `url` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `src_url` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `dst_url` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `timeout_ms` int(0) NULL DEFAULT NULL,
-                                 `ffmpeg_cmd_key` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `rtp_type` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `mediaServerId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `enable_audio` bit(1) NULL DEFAULT NULL,
-                                 `enable_mp4` bit(1) NULL DEFAULT NULL,
-                                 `enable` bit(1) NOT NULL,
-                                 `status` bit(1) NOT NULL,
-                                 `enable_remove_none_reader` bit(1) NOT NULL,
-                                 `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                 `name` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                 `enable_disable_none_reader` bit(1) NULL DEFAULT NULL,
-                                 PRIMARY KEY (`id`) USING BTREE,
-                                 UNIQUE INDEX `stream_proxy_pk`(`app`, `stream`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for stream_push
--- ----------------------------
-DROP TABLE IF EXISTS `stream_push`;
-CREATE TABLE `stream_push`  (
-                                `id` int(0) NOT NULL AUTO_INCREMENT,
-                                `app` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                `stream` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                `totalReaderCount` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                `originType` int(0) NULL DEFAULT NULL,
-                                `originTypeStr` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                `aliveSecond` int(0) NULL DEFAULT NULL,
-                                `mediaServerId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                `serverId` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                                `pushTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                `status` int(0) NULL DEFAULT NULL,
-                                `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                                `pushIng` int(0) NULL DEFAULT NULL,
-                                `self` int(0) NULL DEFAULT NULL,
-                                PRIMARY KEY (`id`) USING BTREE,
-                                UNIQUE INDEX `stream_push_pk`(`app`, `stream`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 153 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for user
--- ----------------------------
-DROP TABLE IF EXISTS `user`;
-CREATE TABLE `user`  (
-                         `id` int(0) NOT NULL AUTO_INCREMENT,
-                         `username` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                         `password` varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                         `roleId` int(0) NOT NULL,
-                         `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                         `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                         `pushKey` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NULL DEFAULT NULL,
-                         PRIMARY KEY (`id`) USING BTREE,
-                         UNIQUE INDEX `user_username_uindex`(`username`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 2 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
--- ----------------------------
--- Table structure for user_role
--- ----------------------------
-DROP TABLE IF EXISTS `user_role`;
-CREATE TABLE `user_role`  (
-                              `id` int(0) NOT NULL AUTO_INCREMENT,
-                              `name` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              `authority` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              `createTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              `updateTime` varchar(50) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci NOT NULL,
-                              PRIMARY KEY (`id`) USING BTREE
-) ENGINE = InnoDB AUTO_INCREMENT = 2 CHARACTER SET = utf8mb4 COLLATE = utf8mb4_general_ci ROW_FORMAT = Dynamic;
-
-SET FOREIGN_KEY_CHECKS = 1;
-
-INSERT INTO `user` VALUES (1, 'admin', '21232f297a57a5a743894a0e4a801fc3', 1, '2021-04-13 14:14:57', '2021-04-13 14:14:57', '3e80d1762a324d5b0ff636e0bd16f1e3');
-INSERT INTO `user_role` VALUES (1, 'admin', '0', '2021-04-13 14:14:57', '2021-04-13 14:14:57');

+ 0 - 4
docker/mysqlDocker/initDocker.sh

@@ -1,4 +0,0 @@
-#! /bin/bash
- mysql -u root -p$MYSQL_ROOT_PASSWORD <<EOF
-source $WORK_PATH/$FILE_0;
-exit

+ 0 - 150
docker/mysqlDocker/install.sh

@@ -1,150 +0,0 @@
-#!/bin/bash -e
-# hfy mysql安装程序
-
-# 获取脚本运行所在路径
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-
-function  runInstallMysql(){
-  _useConfig=$1
-  _installMode=$2
-  w1=1
-  cd $SCRIPT_DIR
-  while [[ $w1 == 1 ]]
-  do
-    clear
-    echo "开始安装mysql,请按照提示输入信息";
-    _Host="0.0.0.0"
-    _Port=3306
-    _User="hfygb"
-    _Passwd="hfygb28181"
-    _dbName="gb_db"
-    if [ "$_useConfig" == "1" ]; then
-      echo "使用配置文件进行安装"
-      mysqlHost=$mysql_host
-      mysqlPort=$mysql_port
-      mysqlUser=$mysql_user
-      mysqlPasswd=$mysql_passwd
-      mysqlDB=$mysql_db
-      w1=2
-    else
-      if [ $_installMode == 2 ]
-            then
-              read -p "请输入连接地址($_Host): " mysqlHost
-            fi
-          mysqlHost=${mysqlHost:-$_Host}
-
-          read -p "请输入mysql外部端口($_Port): " mysqlPort
-          mysqlPort=${mysqlPort:-$_Port}
-
-          read -p "请输入账户名($_User): " mysqlUser
-          mysqlUser=${mysqlUser:-$_User}
-
-          read -p "请输入密码($_Passwd): " mysqlPasswd
-          mysqlPasswd=${mysqlPasswd:-$_Passwd}
-
-          read -p "请输入数据库名($_dbName): " mysqlDB
-              mysqlDB=${mysqlDB:-$_dbName}
-
-          # 修改参数
-          w2=1
-          while [[ $w2 == 1 ]]
-          do
-            echo "
-                    国标平台数据库信息
-                    地址:$mysqlHost
-                    端口:$mysqlPort
-                    账号:$mysqlUser
-                    密码:$mysqlPasswd
-                    库名:$mysqlDB
-                    "
-            echo "是否确认为此信息?y/n"
-            read _yn
-            if [[ $_yn == "y" ]] || [[ $_yn == "yes" ]] || [[ $_yn == "Y" ]] || [[ $_yn == "YES" ]] ; then
-              echo "确认信息";
-              w1=2;
-              break;
-            elif [[ $_yn == "n" ]] || [[ $_yn == "n" ]] || [[ $_yn == "N" ]] || [[ $_yn == "NO" ]] ; then
-              echo "重新输入"
-              w2=2;
-            else
-              echo "---------------------"
-            fi
-          done
-    fi
-  done
-  echo "数据库信息确认完成"
-  if [ $_installMode == 2 ];then
-    echo "导出密码"
-    return 0
-  fi
-  #  开始修改mysql安装配置文件信息
-#  initShellStr="#! /bin/bash
-#mysql -u $mysqlUser -p\$MYSQL_ROOT_PASSWORD <<EOF
-#source \$WORK_PATH/\$FILE_0;"
-#  echo "$initShellStr" > $SCRIPT_DIR"/initDocker.sh"
-
-
-  # 修改sql中的数据
-  sed -i \
-  -e "s/^create DATABASE if not exists .*$/create DATABASE if not exists $mysqlDB character set utf8;/g" \
-  -e "s/^create user .*$/create user '$mysqlUser'@'%' identified by '$mysqlPasswd';/g" \
-  -e "s/^GRANT all on .*$/GRANT all on $mysqlDB.* TO '$mysqlUser'@'%';GRANT all on $mysqlDB.* TO 'root'@'%';/" \
-  -e "s/^use .*$/use $mysqlDB;/" \
-  $SCRIPT_DIR"/init.sql";
-#  return 0
-  if [ $? -ne 0 ]; then
-    echo "change data failed";exit
-  fi
-
-  # 构建镜像
-  docker build -t hfysql:latest .
-  if [ $? -ne 0 ]; then
-    echo "build docker failed";exit
-  fi
-  # 修改stop and rm mysqlName
-  docker stop hfygbmysql
-  docker rm hfygbmysql
-  docker run -itd --name hfygbmysql -p "$mysqlPort":3306  -e MYSQL_ROOT_PASSWORD="hfy$mysqlPasswd" hfysql:latest
-  if [ $? -ne 0 ]; then
-    echo "run docker failed";
-  fi
-  #docker run -itd --name hfygbmysql -p 6543:3306  -e MYSQL_ROOT_PASSWORD=123456 hfysql:latest
-#  echo "sh \$AUTO_RUN_DIR/\$INSTALL_DATA_SHELL" > docker exec -it hfygbmysql bash
-}
-
-clear
-echo -e "\033[33m
-----------------------------------------------
---------合方圆国标平台 数据库安装程序---------
-----------------------------------------------
-
-\033[0m";
-echo -e "\033[35m 是否通过本安装程序来安装mysql? \033[0m";
-PS3="请输入数字来选择安装方式:";
-selectOption_1="自动安装并配置 mysql";
-selectOption_2="已有 mysql 数据库,手动输入 mysql 相关信息";
-select=("$selectOption_1" "$selectOption_2")
-installMode=$1
-installMode=${installMode:-2}
-select fav in "${select[@]}";do
-  case $fav in
-    "$selectOption_1")
-                echo "已选择 $fav"
-                runInstallMysql "$installMode" 1
-                break;
-                ;;
-    "$selectOption_2")
-                  echo "已选择 $fav "
-#                todo 手动输入 mysql 配置文件
-                  runInstallMysql "$installMode" 2
-                  break;
-                  ;;
-    *)
-      echo -e "\033[37m 请输入数字 1 或者 2 来选择 mysql 的安装方式 \033[0m"
-      ;;
-  esac
-done
-echo "安装完成"
-#docker restart hfygbmysql
-cd ../
-

+ 0 - 8
docker/mysqlDocker/installMysql.sh

@@ -1,8 +0,0 @@
-#!/bin/bash
-# 使用镜像启动容器
-
-# 获取脚本运行所在路径
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-
-# 获取配置文件
-source $SCRIPT_DIR/config.sh

+ 0 - 35
docker/mysqlDocker/my.cnf

@@ -1,35 +0,0 @@
-
-[mysqld]
-port=3306
-user=hfygb
-# mysql 数据文件位置
-datadir=/data/mysql
-
-# pid
-pid-file=/var/run/mysqld/mysqld.pid
-socket=/var/run/mysqld/mysqld.sock
-default-time-zone = '+8:00'
-
-bind_address=0.0.0.0
-#只能用ip地址检查客户端的登陆
-skip_name_resolve=1
-
-
-#事务隔离级别,默认为可重复读,mysql默认可重复读级别(此级别下可能参数很多间隙锁,影响性能)
-transaction_isolation=READ-COMMITTED
-
-
-#最大连接数
-max_connections=400
-
-#最大错误连接数
-max_connect_errors=1000
-
-#TIMESTAMP如果没有显示声明NOT NULL,允许NULL值
-explicit_defaults_for_timestamp=true
-
-#SQL数据包发送的大小,如果有BLOB对象建议修改成1G
-max_allowed_packet=10M
-
-
-

+ 0 - 3
docker/mysqlDocker/restart.sh

@@ -1,3 +0,0 @@
-#!/bin/bash -e
-echo "run: docker restart hfygbmysql"
-docker restart hfygbmysql

+ 0 - 1
docker/mysqlDocker/run.sh

@@ -1 +0,0 @@
-docker run -itd --name hfyroot -p 3306:3306  -e MYSQL_ROOT_PASSWORD=hfyhfygb28181 hfy_gb:v1.1

+ 0 - 8
docker/reConfig.sh

@@ -1,8 +0,0 @@
-#!/bin/bash -e
-#重新设置参数并启动
-echo "欢迎使用hfy重新设置参数程序"
-# 重新生成配置
-echo "将会根据config.cfg文件内容来更改配置文件并修改"
-source ./config.cfg
-source ./gbDocker/reConfigGB.sh
-

+ 0 - 13
docker/redisDocker/Dockerfile

@@ -1,13 +0,0 @@
-# 设置基础镜像为redis
-FROM redis:latest
-
-MAINTAINER kindring
-VOLUME "/data"
-
-
-# 复制redis.conf配置文件到 工作目录中
-COPY ./redis.conf /etc/redis/redis.conf
-CMD redis-server /etc/redis/redis.conf
-
-
-

+ 0 - 17
docker/redisDocker/docker-compse.yml

@@ -1,17 +0,0 @@
-version: '1.0'
-services:
-        redis:
-                hostname: redis
-                image: hfy_redis:1.0
-                container_name: redis
-                restart: unless-stoppend
-                command: redis-server /etc/redis.conf
-                environment:
-                        - TZ=Asia/Shanghai
-                volumes:
-                        - /data/redis:/data/redis
-                        - ./logs:/logs
-                        - ./redis.conf:/etc/redis.conf
-                ports:
-                        - "6890:6889"
-

+ 0 - 123
docker/redisDocker/install.sh

@@ -1,123 +0,0 @@
-#!/bin/bash -e
-#tar xvzf redis-7.0.7.tar.gz
-#sudo docker build -t hfy_redis:v7.0.7 .
-
-# 获取脚本运行所在路径
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-# 编译安装redis
-function installRedis(){
-  echo "欢迎本 redis 安装程序"
-  _useConfig=$1
-  _installMode=$2
-  w1=1
-  cd $SCRIPT_DIR
-
-  while [[ $w1 == 1 ]]
-  do
-    clear
-
-    _Host="0.0.0.0"
-    _Port=6543
-    _User="hfyredis"
-    _Passwd="hfyredis28181"
-    _dbName="gb_db"
-    if [ "$_useConfig" == "1" ]; then
-      echo "使用配置文件进行安装"
-      redisHost=$redis_host
-      redisPort=$redis_port
-      redisPasswd=$redis_passwd
-      redisDB=$redis_db
-      w1=2
-    else
-      echo "开始安装redis,请按照提示输入信息"
-          if [ $_installMode == 2 ];then
-            read -p "请输入连接地址($_Host): " redisHost
-          fi
-          redisHost=${redisHost:-$_Host}
-
-          read -p "请输入redis外部端口($_Port): " redisPort
-          redisPort=${redisPort:-$_Port}
-
-      #    read -p "请输入账户名($_User): " redisUser
-      #    redisUser=${redisUser:-$_User}
-
-          read -p "请输入密码($_Passwd): " redisPasswd
-              redisPasswd=${redisPasswd:-$_Passwd}
-
-          read -p "请输入数据库($_dbName): " $redisDB
-              redisDB=${redisDB:-$_dbName}
-        # 修改参数
-          w2=1
-          while [[ $w2 == 1 ]]
-          do
-            echo "
-                    国标平台Redis数据库信息
-                    地址:$redisHost
-                    端口:$redisPort
-                    密码:$redisPasswd
-                    数据库:$redisDB
-                    "
-            echo "是否确认为此信息?y/n"
-            read _yn
-            if [[ $_yn == "y" ]] || [[ $_yn == "yes" ]] || [[ $_yn == "Y" ]] || [[ $_yn == "YES" ]] ; then
-              echo "确认信息";
-              w1=2;
-              break;
-            elif [[ $_yn == "n" ]] || [[ $_yn == "n" ]] || [[ $_yn == "N" ]] || [[ $_yn == "NO" ]] ; then
-              echo "重新输入"
-              w2=2;
-            else
-              echo "---------------------"
-            fi
-          done
-    fi
-
-  done
-  echo "数据库信息确认完成"
-  if [ $_installMode == 2 ];then
-    echo "导出密码"
-    return 0
-  fi
-  # 修改配置文件
-  sed -i \
-    -e "s/^port .*$/port $redisPort/g" \
-    -e "s/^requirepass .*$/requirepass $redisPasswd/g" \
-    $SCRIPT_DIR"/redis.conf";
-  docker build -t hfyredis:latest .
-  docker stop hfy_redis
-  docker rm hfy_redis
-  docker run -it -p "$redisPort":"$redisPort" -v /data/redis:/data/redis -v /data/redis/logs:/data/redis/logs --name hfy_redis -itd hfyredis:latest
-}
-
-clear
-echo -e "\033[33m
-----------------------------------------------
---------合方圆国标平台 Redis数据库安装程序---------
-----------------------------------------------
-\033[0m";
-echo -e "\033[35m 是否通过本安装程序来安装Redis? \033[0m";
-PS3="请输入数字来选择安装方式:";
-selectOption_1="自动安装并配置 Redis";
-selectOption_2="已有 Redis 数据库,手动输入 Redis 相关信息";
-select=("$selectOption_1" "$selectOption_2")
-installMode=$1
-installMode=${installMode:-2}
-select fav in "${select[@]}";do
-  case $fav in
-    "$selectOption_1")
-        echo "已选择 $fav"
-        installRedis "$installMode" 1
-        break;
-        ;;
-    "$selectOption_2")
-        echo "已选择 $fav "
-        installRedis "$installMode" 2
-        break;
-        ;;
-    *)
-      echo -e "\033[37m 请输入数字 1 或者 2 来选择 redis 的安装方式 \033[0m"
-      ;;
-  esac
-done
-echo "安装完成"
-cd ../

BIN
docker/redisDocker/redis-7.0.7.tar.gz


+ 0 - 1254
docker/redisDocker/redis.conf

@@ -1,1254 +0,0 @@
-# Redis configuration file example.
-#
-# Note that in order to read the configuration file, Redis must be
-# started with the file path as first argument:
-#
-# ./redis-server /path/to/redis.conf
-
-# Note on units: when memory size is needed, it is possible to specify
-# it in the usual form of 1k 5GB 4M and so forth:
-#
-# 1k => 1000 bytes
-# 1kb => 1024 bytes
-# 1m => 1000000 bytes
-# 1mb => 1024*1024 bytes
-# 1g => 1000000000 bytes
-# 1gb => 1024*1024*1024 bytes
-#
-# units are case insensitive so 1GB 1Gb 1gB are all the same.
-
-################################## INCLUDES ###################################
-
-# Include one or more other config files here.  This is useful if you
-# have a standard template that goes to all Redis servers but also need
-# to customize a few per-server settings.  Include files can include
-# other files, so use this wisely.
-#
-# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
-# from admin or Redis Sentinel. Since Redis always uses the last processed
-# line as value of a configuration directive, you'd better put includes
-# at the beginning of this file to avoid overwriting config change at runtime.
-#
-# If instead you are interested in using includes to override configuration
-# options, it is better to use include as the last line.
-#
-# include /path/to/local.conf
-# include /path/to/other.conf
-
-################################## MODULES #####################################
-
-# Load modules at startup. If the server is not able to load modules
-# it will abort. It is possible to use multiple loadmodule directives.
-#
-# loadmodule /path/to/my_module.so
-# loadmodule /path/to/other_module.so
-
-################################## NETWORK #####################################
-
-# By default, if no "bind" configuration directive is specified, Redis listens
-# for connections from all the network interfaces available on the server.
-# It is possible to listen to just one or multiple selected interfaces using
-# the "bind" configuration directive, followed by one or more IP addresses.
-#
-# Examples:
-#
-# bind 192.168.1.100 10.0.0.1
-# bind 127.0.0.1 ::1
-#
-# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
-# internet, binding to all the interfaces is dangerous and will expose the
-# instance to everybody on the internet. So by default we uncomment the
-# following bind directive, that will force Redis to listen only into
-# the IPv4 loopback interface address (this means Redis will be able to
-# accept connections only from clients running into the same computer it
-# is running).
-#
-# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
-# JUST COMMENT THE FOLLOWING LINE.
-# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# bind 127.0.0.1 ::1
-
-# Protected mode is a layer of security protection, in order to avoid that
-# Redis instances left open on the internet are accessed and exploited.
-#
-# When protected mode is on and if:
-#
-# 1) The server is not binding explicitly to a set of addresses using the
-#    "bind" directive.
-# 2) No password is configured.
-#
-# The server only accepts connections from clients connecting from the
-# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
-# sockets.
-#
-# By default protected mode is enabled. You should disable it only if
-# you are sure you want clients from other hosts to connect to Redis
-# even if no authentication is configured, nor a specific set of interfaces
-# are explicitly listed using the "bind" directive.
-protected-mode no
-
-# Accept connections on the specified port, default is 6379 (IANA #815344).
-# If port 0 is specified Redis will not listen on a TCP socket.
-port 7654
-
-# TCP listen() backlog.
-#
-# In high requests-per-second environments you need an high backlog in order
-# to avoid slow clients connections issues. Note that the Linux kernel
-# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
-# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
-# in order to get the desired effect.
-tcp-backlog 511
-
-# Unix socket.
-#
-# Specify the path for the Unix socket that will be used to listen for
-# incoming connections. There is no default, so Redis will not listen
-# on a unix socket when not specified.
-#
-# unixsocket /var/run/redis/redis-server.sock
-# unixsocketperm 700
-
-# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 0
-
-# TCP keepalive.
-#
-# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
-# of communication. This is useful for two reasons:
-#
-# 1) Detect dead peers.
-# 2) Take the connection alive from the point of view of network
-#    equipment in the middle.
-#
-# On Linux, the specified value (in seconds) is the period used to send ACKs.
-# Note that to close the connection the double of the time is needed.
-# On other kernels the period depends on the kernel configuration.
-#
-# A reasonable value for this option is 300 seconds, which is the new
-# Redis default starting with Redis 3.2.1.
-tcp-keepalive 300
-
-################################# GENERAL #####################################
-
-# 是否为守护进程运行 docker 镜像中需要为非守护进程运行
-daemonize no
-
-# If you run Redis from upstart or systemd, Redis can interact with your
-# supervision tree. Options:
-#   supervised no      - no supervision interaction
-#   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
-#   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
-#   supervised auto    - detect upstart or systemd method based on
-#                        UPSTART_JOB or NOTIFY_SOCKET environment variables
-# Note: these supervision methods only signal "process is ready."
-#       They do not enable continuous liveness pings back to your supervisor.
-supervised no
-
-# If a pid file is specified, Redis writes it where specified at startup
-# and removes it at exit.
-#
-# When the server runs non daemonized, no pid file is created if none is
-# specified in the configuration. When the server is daemonized, the pid file
-# is used even if not specified, defaulting to "/var/run/redis.pid".
-#
-# Creating a pid file is best effort: if Redis is not able to create it
-# nothing bad happens, the server will start and run normally.
-pidfile /var/run/redis/redis-server.pid
-
-# Specify the server verbosity level.
-# This can be one of:
-# debug (a lot of information, useful for development/testing)
-# verbose (many rarely useful info, but not a mess like the debug level)
-# notice (moderately verbose, what you want in production probably)
-# warning (only very important / critical messages are logged)
-loglevel notice
-
-# Specify the log file name. Also the empty string can be used to force
-# Redis to log on the standard output. Note that if you use standard
-# output for logging but daemonize, logs will be sent to /dev/null
-logfile /data/redis/logs/redis-server.log
-
-# To enable logging to the system logger, just set 'syslog-enabled' to yes,
-# and optionally update the other syslog parameters to suit your needs.
-# syslog-enabled no
-
-# Specify the syslog identity.
-# syslog-ident redis
-
-# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
-# syslog-facility local0
-
-# Set the number of databases. The default database is DB 0, you can select
-# a different one on a per-connection basis using SELECT <dbid> where
-# dbid is a number between 0 and 'databases'-1
-databases 16
-
-# By default Redis shows an ASCII art logo only when started to log to the
-# standard output and if the standard output is a TTY. Basically this means
-# that normally a logo is displayed only in interactive sessions.
-#
-# However it is possible to force the pre-4.0 behavior and always show a
-# ASCII art logo in startup logs by setting the following option to yes.
-always-show-logo yes
-
-################################ SNAPSHOTTING  ################################
-#
-# Save the DB on disk:
-#
-#   save <seconds> <changes>
-#
-#   Will save the DB if both the given number of seconds and the given
-#   number of write operations against the DB occurred.
-#
-#   In the example below the behaviour will be to save:
-#   after 900 sec (15 min) if at least 1 key changed
-#   after 300 sec (5 min) if at least 10 keys changed
-#   after 60 sec if at least 10000 keys changed
-#
-#   Note: you can disable saving completely by commenting out all "save" lines.
-#
-#   It is also possible to remove all the previously configured save
-#   points by adding a save directive with a single empty string argument
-#   like in the following example:
-#
-#   save ""
-
-save 900 1
-save 300 10
-save 60 10000
-
-# By default Redis will stop accepting writes if RDB snapshots are enabled
-# (at least one save point) and the latest background save failed.
-# This will make the user aware (in a hard way) that data is not persisting
-# on disk properly, otherwise chances are that no one will notice and some
-# disaster will happen.
-#
-# If the background saving process will start working again Redis will
-# automatically allow writes again.
-#
-# However if you have setup your proper monitoring of the Redis server
-# and persistence, you may want to disable this feature so that Redis will
-# continue to work as usual even if there are problems with disk,
-# permissions, and so forth.
-stop-writes-on-bgsave-error yes
-
-# Compress string objects using LZF when dump .rdb databases?
-# For default that's set to 'yes' as it's almost always a win.
-# If you want to save some CPU in the saving child set it to 'no' but
-# the dataset will likely be bigger if you have compressible values or keys.
-rdbcompression yes
-
-# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
-# This makes the format more resistant to corruption but there is a performance
-# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
-# for maximum performances.
-#
-# RDB files created with checksum disabled have a checksum of zero that will
-# tell the loading code to skip the check.
-rdbchecksum yes
-
-# The filename where to dump the DB
-dbfilename dump.rdb
-
-# The working directory.
-#
-# The DB will be written inside this directory, with the filename specified
-# above using the 'dbfilename' configuration directive.
-#
-# The Append Only File will also be created inside this directory.
-#
-# Note that you must specify a directory here, not a file name.
-dir /data/redis
-
-################################# REPLICATION #################################
-
-# Master-Replica replication. Use replicaof to make a Redis instance a copy of
-# another Redis server. A few things to understand ASAP about Redis replication.
-#
-#   +------------------+      +---------------+
-#   |      Master      | ---> |    Replica    |
-#   | (receive writes) |      |  (exact copy) |
-#   +------------------+      +---------------+
-#
-# 1) Redis replication is asynchronous, but you can configure a master to
-#    stop accepting writes if it appears to be not connected with at least
-#    a given number of replicas.
-# 2) Redis replicas are able to perform a partial resynchronization with the
-#    master if the replication link is lost for a relatively small amount of
-#    time. You may want to configure the replication backlog size (see the next
-#    sections of this file) with a sensible value depending on your needs.
-# 3) Replication is automatic and does not need user intervention. After a
-#    network partition replicas automatically try to reconnect to masters
-#    and resynchronize with them.
-#
-# replicaof <masterip> <masterport>
-
-# If the master is password protected (using the "requirepass" configuration
-# directive below) it is possible to tell the replica to authenticate before
-# starting the replication synchronization process, otherwise the master will
-# refuse the replica request.
-#
-# masterauth <master-password>
-
-# When a replica loses its connection with the master, or when the replication
-# is still in progress, the replica can act in two different ways:
-#
-# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will
-#    still reply to client requests, possibly with out of date data, or the
-#    data set may just be empty if this is the first synchronization.
-#
-# 2) if replica-serve-stale-data is set to 'no' the replica will reply with
-#    an error "SYNC with master in progress" to all the kind of commands
-#    but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG,
-#    SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB,
-#    COMMAND, POST, HOST: and LATENCY.
-#
-replica-serve-stale-data yes
-
-# You can configure a replica instance to accept writes or not. Writing against
-# a replica instance may be useful to store some ephemeral data (because data
-# written on a replica will be easily deleted after resync with the master) but
-# may also cause problems if clients are writing to it because of a
-# misconfiguration.
-#
-# Since Redis 2.6 by default replicas are read-only.
-#
-# Note: read only replicas are not designed to be exposed to untrusted clients
-# on the internet. It's just a protection layer against misuse of the instance.
-# Still a read only replica exports by default all the administrative commands
-# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
-# security of read only replicas using 'rename-command' to shadow all the
-# administrative / dangerous commands.
-replica-read-only yes
-
-# Replication SYNC strategy: disk or socket.
-#
-# -------------------------------------------------------
-# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
-# -------------------------------------------------------
-#
-# New replicas and reconnecting replicas that are not able to continue the replication
-# process just receiving differences, need to do what is called a "full
-# synchronization". An RDB file is transmitted from the master to the replicas.
-# The transmission can happen in two different ways:
-#
-# 1) Disk-backed: The Redis master creates a new process that writes the RDB
-#                 file on disk. Later the file is transferred by the parent
-#                 process to the replicas incrementally.
-# 2) Diskless: The Redis master creates a new process that directly writes the
-#              RDB file to replica sockets, without touching the disk at all.
-#
-# With disk-backed replication, while the RDB file is generated, more replicas
-# can be queued and served with the RDB file as soon as the current child producing
-# the RDB file finishes its work. With diskless replication instead once
-# the transfer starts, new replicas arriving will be queued and a new transfer
-# will start when the current one terminates.
-#
-# When diskless replication is used, the master waits a configurable amount of
-# time (in seconds) before starting the transfer in the hope that multiple replicas
-# will arrive and the transfer can be parallelized.
-#
-# With slow disks and fast (large bandwidth) networks, diskless replication
-# works better.
-repl-diskless-sync no
-
-# When diskless replication is enabled, it is possible to configure the delay
-# the server waits in order to spawn the child that transfers the RDB via socket
-# to the replicas.
-#
-# This is important since once the transfer starts, it is not possible to serve
-# new replicas arriving, that will be queued for the next RDB transfer, so the server
-# waits a delay in order to let more replicas arrive.
-#
-# The delay is specified in seconds, and by default is 5 seconds. To disable
-# it entirely just set it to 0 seconds and the transfer will start ASAP.
-repl-diskless-sync-delay 5
-
-# Replicas send PINGs to server in a predefined interval. It's possible to change
-# this interval with the repl_ping_replica_period option. The default value is 10
-# seconds.
-#
-# repl-ping-replica-period 10
-
-# The following option sets the replication timeout for:
-#
-# 1) Bulk transfer I/O during SYNC, from the point of view of replica.
-# 2) Master timeout from the point of view of replicas (data, pings).
-# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings).
-#
-# It is important to make sure that this value is greater than the value
-# specified for repl-ping-replica-period otherwise a timeout will be detected
-# every time there is low traffic between the master and the replica.
-#
-# repl-timeout 60
-
-# Disable TCP_NODELAY on the replica socket after SYNC?
-#
-# If you select "yes" Redis will use a smaller number of TCP packets and
-# less bandwidth to send data to replicas. But this can add a delay for
-# the data to appear on the replica side, up to 40 milliseconds with
-# Linux kernels using a default configuration.
-#
-# If you select "no" the delay for data to appear on the replica side will
-# be reduced but more bandwidth will be used for replication.
-#
-# By default we optimize for low latency, but in very high traffic conditions
-# or when the master and replicas are many hops away, turning this to "yes" may
-# be a good idea.
-repl-disable-tcp-nodelay no
-
-# Set the replication backlog size. The backlog is a buffer that accumulates
-# replica data when replicas are disconnected for some time, so that when a replica
-# wants to reconnect again, often a full resync is not needed, but a partial
-# resync is enough, just passing the portion of data the replica missed while
-# disconnected.
-#
-# The bigger the replication backlog, the longer the time the replica can be
-# disconnected and later be able to perform a partial resynchronization.
-#
-# The backlog is only allocated once there is at least a replica connected.
-#
-# repl-backlog-size 1mb
-
-# After a master has no longer connected replicas for some time, the backlog
-# will be freed. The following option configures the amount of seconds that
-# need to elapse, starting from the time the last replica disconnected, for
-# the backlog buffer to be freed.
-#
-# Note that replicas never free the backlog for timeout, since they may be
-# promoted to masters later, and should be able to correctly "partially
-# resynchronize" with the replicas: hence they should always accumulate backlog.
-#
-# A value of 0 means to never release the backlog.
-#
-# repl-backlog-ttl 3600
-
-# The replica priority is an integer number published by Redis in the INFO output.
-# It is used by Redis Sentinel in order to select a replica to promote into a
-# master if the master is no longer working correctly.
-#
-# A replica with a low priority number is considered better for promotion, so
-# for instance if there are three replicas with priority 10, 100, 25 Sentinel will
-# pick the one with priority 10, that is the lowest.
-#
-# However a special priority of 0 marks the replica as not able to perform the
-# role of master, so a replica with priority of 0 will never be selected by
-# Redis Sentinel for promotion.
-#
-# By default the priority is 100.
-replica-priority 100
-
-# It is possible for a master to stop accepting writes if there are less than
-# N replicas connected, having a lag less or equal than M seconds.
-#
-# The N replicas need to be in "online" state.
-#
-# The lag in seconds, that must be <= the specified value, is calculated from
-# the last ping received from the replica, that is usually sent every second.
-#
-# This option does not GUARANTEE that N replicas will accept the write, but
-# will limit the window of exposure for lost writes in case not enough replicas
-# are available, to the specified number of seconds.
-#
-# For example to require at least 3 replicas with a lag <= 10 seconds use:
-#
-# min-replicas-to-write 3
-# min-replicas-max-lag 10
-#
-# Setting one or the other to 0 disables the feature.
-#
-# By default min-replicas-to-write is set to 0 (feature disabled) and
-# min-replicas-max-lag is set to 10.
-
-# A Redis master is able to list the address and port of the attached
-# replicas in different ways. For example the "INFO replication" section
-# offers this information, which is used, among other tools, by
-# Redis Sentinel in order to discover replica instances.
-# Another place where this info is available is in the output of the
-# "ROLE" command of a master.
-#
-# The listed IP and address normally reported by a replica is obtained
-# in the following way:
-#
-#   IP: The address is auto detected by checking the peer address
-#   of the socket used by the replica to connect with the master.
-#
-#   Port: The port is communicated by the replica during the replication
-#   handshake, and is normally the port that the replica is using to
-#   listen for connections.
-#
-# However when port forwarding or Network Address Translation (NAT) is
-# used, the replica may be actually reachable via different IP and port
-# pairs. The following two options can be used by a replica in order to
-# report to its master a specific set of IP and port, so that both INFO
-# and ROLE will report those values.
-#
-# There is no need to use both the options if you need to override just
-# the port or the IP address.
-#
-# replica-announce-ip 5.5.5.5
-# replica-announce-port 1234
-
-################################## SECURITY ###################################
-
-# Require clients to issue AUTH <PASSWORD> before processing any other
-# commands.  This might be useful in environments in which you do not trust
-# others with access to the host running redis-server.
-#
-# This should stay commented out for backward compatibility and because most
-# people do not need auth (e.g. they run their own servers).
-#
-# Warning: since Redis is pretty fast an outside user can try up to
-# 150k passwords per second against a good box. This means that you should
-# use a very strong password otherwise it will be very easy to break.
-#
-requirepass hfyredis28181
-
-# Command renaming.
-#
-# It is possible to change the name of dangerous commands in a shared
-# environment. For instance the CONFIG command may be renamed into something
-# hard to guess so that it will still be available for internal-use tools
-# but not available for general clients.
-#
-# Example:
-#
-# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
-#
-# It is also possible to completely kill a command by renaming it into
-# an empty string:
-#
-# rename-command CONFIG ""
-#
-# Please note that changing the name of commands that are logged into the
-# AOF file or transmitted to replicas may cause problems.
-
-################################### CLIENTS ####################################
-
-# Set the max number of connected clients at the same time. By default
-# this limit is set to 10000 clients, however if the Redis server is not
-# able to configure the process file limit to allow for the specified limit
-# the max number of allowed clients is set to the current file limit
-# minus 32 (as Redis reserves a few file descriptors for internal uses).
-#
-# Once the limit is reached Redis will close all the new connections sending
-# an error 'max number of clients reached'.
-#
-# maxclients 10000
-
-############################## MEMORY MANAGEMENT ################################
-
-# Set a memory usage limit to the specified amount of bytes.
-# When the memory limit is reached Redis will try to remove keys
-# according to the eviction policy selected (see maxmemory-policy).
-#
-# If Redis can't remove keys according to the policy, or if the policy is
-# set to 'noeviction', Redis will start to reply with errors to commands
-# that would use more memory, like SET, LPUSH, and so on, and will continue
-# to reply to read-only commands like GET.
-#
-# This option is usually useful when using Redis as an LRU or LFU cache, or to
-# set a hard memory limit for an instance (using the 'noeviction' policy).
-#
-# WARNING: If you have replicas attached to an instance with maxmemory on,
-# the size of the output buffers needed to feed the replicas are subtracted
-# from the used memory count, so that network problems / resyncs will
-# not trigger a loop where keys are evicted, and in turn the output
-# buffer of replicas is full with DELs of keys evicted triggering the deletion
-# of more keys, and so forth until the database is completely emptied.
-#
-# In short... if you have replicas attached it is suggested that you set a lower
-# limit for maxmemory so that there is some free RAM on the system for replica
-# output buffers (but this is not needed if the policy is 'noeviction').
-#
-# maxmemory <bytes>
-
-# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
-# is reached. You can select among five behaviors:
-#
-# volatile-lru -> Evict using approximated LRU among the keys with an expire set.
-# allkeys-lru -> Evict any key using approximated LRU.
-# volatile-lfu -> Evict using approximated LFU among the keys with an expire set.
-# allkeys-lfu -> Evict any key using approximated LFU.
-# volatile-random -> Remove a random key among the ones with an expire set.
-# allkeys-random -> Remove a random key, any key.
-# volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
-# noeviction -> Don't evict anything, just return an error on write operations.
-#
-# LRU means Least Recently Used
-# LFU means Least Frequently Used
-#
-# Both LRU, LFU and volatile-ttl are implemented using approximated
-# randomized algorithms.
-#
-# Note: with any of the above policies, Redis will return an error on write
-#       operations, when there are no suitable keys for eviction.
-#
-#       At the date of writing these commands are: set setnx setex append
-#       incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
-#       sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
-#       zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
-#       getset mset msetnx exec sort
-#
-# The default is:
-#
-# maxmemory-policy noeviction
-
-# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
-# algorithms (in order to save memory), so you can tune it for speed or
-# accuracy. For default Redis will check five keys and pick the one that was
-# used less recently, you can change the sample size using the following
-# configuration directive.
-#
-# The default of 5 produces good enough results. 10 Approximates very closely
-# true LRU but costs more CPU. 3 is faster but not very accurate.
-#
-# maxmemory-samples 5
-
-# Starting from Redis 5, by default a replica will ignore its maxmemory setting
-# (unless it is promoted to master after a failover or manually). It means
-# that the eviction of keys will be just handled by the master, sending the
-# DEL commands to the replica as keys evict in the master side.
-#
-# This behavior ensures that masters and replicas stay consistent, and is usually
-# what you want, however if your replica is writable, or you want the replica to have
-# a different memory setting, and you are sure all the writes performed to the
-# replica are idempotent, then you may change this default (but be sure to understand
-# what you are doing).
-#
-# Note that since the replica by default does not evict, it may end using more
-# memory than the one set via maxmemory (there are certain buffers that may
-# be larger on the replica, or data structures may sometimes take more memory and so
-# forth). So make sure you monitor your replicas and make sure they have enough
-# memory to never hit a real out-of-memory condition before the master hits
-# the configured maxmemory setting.
-#
-# replica-ignore-maxmemory yes
-
-############################# LAZY FREEING ####################################
-
-# Redis has two primitives to delete keys. One is called DEL and is a blocking
-# deletion of the object. It means that the server stops processing new commands
-# in order to reclaim all the memory associated with an object in a synchronous
-# way. If the key deleted is associated with a small object, the time needed
-# in order to execute the DEL command is very small and comparable to most other
-# O(1) or O(log_N) commands in Redis. However if the key is associated with an
-# aggregated value containing millions of elements, the server can block for
-# a long time (even seconds) in order to complete the operation.
-#
-# For the above reasons Redis also offers non blocking deletion primitives
-# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
-# FLUSHDB commands, in order to reclaim memory in background. Those commands
-# are executed in constant time. Another thread will incrementally free the
-# object in the background as fast as possible.
-#
-# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
-# It's up to the design of the application to understand when it is a good
-# idea to use one or the other. However the Redis server sometimes has to
-# delete keys or flush the whole database as a side effect of other operations.
-# Specifically Redis deletes objects independently of a user call in the
-# following scenarios:
-#
-# 1) On eviction, because of the maxmemory and maxmemory policy configurations,
-#    in order to make room for new data, without going over the specified
-#    memory limit.
-# 2) Because of expire: when a key with an associated time to live (see the
-#    EXPIRE command) must be deleted from memory.
-# 3) Because of a side effect of a command that stores data on a key that may
-#    already exist. For example the RENAME command may delete the old key
-#    content when it is replaced with another one. Similarly SUNIONSTORE
-#    or SORT with STORE option may delete existing keys. The SET command
-#    itself removes any old content of the specified key in order to replace
-#    it with the specified string.
-# 4) During replication, when a replica performs a full resynchronization with
-#    its master, the content of the whole database is removed in order to
-#    load the RDB file just transferred.
-#
-# In all the above cases the default is to delete objects in a blocking way,
-# like if DEL was called. However you can configure each case specifically
-# in order to instead release memory in a non-blocking way like if UNLINK
-# was called, using the following configuration directives:
-
-lazyfree-lazy-eviction no
-lazyfree-lazy-expire no
-lazyfree-lazy-server-del no
-replica-lazy-flush no
-
-############################## APPEND ONLY MODE ###############################
-
-# By default Redis asynchronously dumps the dataset on disk. This mode is
-# good enough in many applications, but an issue with the Redis process or
-# a power outage may result into a few minutes of writes lost (depending on
-# the configured save points).
-#
-# The Append Only File is an alternative persistence mode that provides
-# much better durability. For instance using the default data fsync policy
-# (see later in the config file) Redis can lose just one second of writes in a
-# dramatic event like a server power outage, or a single write if something
-# wrong with the Redis process itself happens, but the operating system is
-# still running correctly.
-#
-# AOF and RDB persistence can be enabled at the same time without problems.
-# If the AOF is enabled on startup Redis will load the AOF, that is the file
-# with the better durability guarantees.
-#
-# Please check http://redis.io/topics/persistence for more information.
-
-appendonly no
-
-# The name of the append only file (default: "appendonly.aof")
-
-appendfilename "appendonly.aof"
-
-# The fsync() call tells the Operating System to actually write data on disk
-# instead of waiting for more data in the output buffer. Some OS will really flush
-# data on disk, some other OS will just try to do it ASAP.
-#
-# Redis supports three different modes:
-#
-# no: don't fsync, just let the OS flush the data when it wants. Faster.
-# always: fsync after every write to the append only log. Slow, Safest.
-# everysec: fsync only one time every second. Compromise.
-#
-# The default is "everysec", as that's usually the right compromise between
-# speed and data safety. It's up to you to understand if you can relax this to
-# "no" that will let the operating system flush the output buffer when
-# it wants, for better performances (but if you can live with the idea of
-# some data loss consider the default persistence mode that's snapshotting),
-# or on the contrary, use "always" that's very slow but a bit safer than
-# everysec.
-#
-# More details please check the following article:
-# http://antirez.com/post/redis-persistence-demystified.html
-#
-# If unsure, use "everysec".
-
-# appendfsync always
-appendfsync everysec
-# appendfsync no
-
-# When the AOF fsync policy is set to always or everysec, and a background
-# saving process (a background save or AOF log background rewriting) is
-# performing a lot of I/O against the disk, in some Linux configurations
-# Redis may block too long on the fsync() call. Note that there is no fix for
-# this currently, as even performing fsync in a different thread will block
-# our synchronous write(2) call.
-#
-# In order to mitigate this problem it's possible to use the following option
-# that will prevent fsync() from being called in the main process while a
-# BGSAVE or BGREWRITEAOF is in progress.
-#
-# This means that while another child is saving, the durability of Redis is
-# the same as "appendfsync none". In practical terms, this means that it is
-# possible to lose up to 30 seconds of log in the worst scenario (with the
-# default Linux settings).
-#
-# If you have latency problems turn this to "yes". Otherwise leave it as
-# "no" that is the safest pick from the point of view of durability.
-
-no-appendfsync-on-rewrite no
-
-# Automatic rewrite of the append only file.
-# Redis is able to automatically rewrite the log file implicitly calling
-# BGREWRITEAOF when the AOF log size grows by the specified percentage.
-#
-# This is how it works: Redis remembers the size of the AOF file after the
-# latest rewrite (if no rewrite has happened since the restart, the size of
-# the AOF at startup is used).
-#
-# This base size is compared to the current size. If the current size is
-# bigger than the specified percentage, the rewrite is triggered. Also
-# you need to specify a minimal size for the AOF file to be rewritten, this
-# is useful to avoid rewriting the AOF file even if the percentage increase
-# is reached but it is still pretty small.
-#
-# Specify a percentage of zero in order to disable the automatic AOF
-# rewrite feature.
-
-auto-aof-rewrite-percentage 100
-auto-aof-rewrite-min-size 64mb
-
-# An AOF file may be found to be truncated at the end during the Redis
-# startup process, when the AOF data gets loaded back into memory.
-# This may happen when the system where Redis is running
-# crashes, especially when an ext4 filesystem is mounted without the
-# data=ordered option (however this can't happen when Redis itself
-# crashes or aborts but the operating system still works correctly).
-#
-# Redis can either exit with an error when this happens, or load as much
-# data as possible (the default now) and start if the AOF file is found
-# to be truncated at the end. The following option controls this behavior.
-#
-# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
-# the Redis server starts emitting a log to inform the user of the event.
-# Otherwise if the option is set to no, the server aborts with an error
-# and refuses to start. When the option is set to no, the user requires
-# to fix the AOF file using the "redis-check-aof" utility before to restart
-# the server.
-#
-# Note that if the AOF file will be found to be corrupted in the middle
-# the server will still exit with an error. This option only applies when
-# Redis will try to read more data from the AOF file but not enough bytes
-# will be found.
-aof-load-truncated yes
-
-# When rewriting the AOF file, Redis is able to use an RDB preamble in the
-# AOF file for faster rewrites and recoveries. When this option is turned
-# on the rewritten AOF file is composed of two different stanzas:
-#
-#   [RDB file][AOF tail]
-#
-# When loading Redis recognizes that the AOF file starts with the "REDIS"
-# string and loads the prefixed RDB file, and continues loading the AOF
-# tail.
-aof-use-rdb-preamble yes
-
-################################ LUA SCRIPTING  ###############################
-
-# Max execution time of a Lua script in milliseconds.
-#
-# If the maximum execution time is reached Redis will log that a script is
-# still in execution after the maximum allowed time and will start to
-# reply to queries with an error.
-#
-# When a long running script exceeds the maximum execution time only the
-# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
-# used to stop a script that did not yet called write commands. The second
-# is the only way to shut down the server in the case a write command was
-# already issued by the script but the user doesn't want to wait for the natural
-# termination of the script.
-#
-# Set it to 0 or a negative value for unlimited execution without warnings.
-lua-time-limit 5000
-
-################################ REDIS CLUSTER  ###############################
-
-# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
-# started as cluster nodes can. In order to start a Redis instance as a
-# cluster node enable the cluster support uncommenting the following:
-#
-# cluster-enabled yes
-
-# Every cluster node has a cluster configuration file. This file is not
-# intended to be edited by hand. It is created and updated by Redis nodes.
-# Every Redis Cluster node requires a different cluster configuration file.
-# Make sure that instances running in the same system do not have
-# overlapping cluster configuration file names.
-#
-# cluster-config-file nodes-6379.conf
-
-# Cluster node timeout is the amount of milliseconds a node must be unreachable
-# for it to be considered in failure state.
-# Most other internal time limits are multiple of the node timeout.
-#
-# cluster-node-timeout 15000
-
-# A replica of a failing master will avoid to start a failover if its data
-# looks too old.
-#
-# There is no simple way for a replica to actually have an exact measure of
-# its "data age", so the following two checks are performed:
-#
-# 1) If there are multiple replicas able to failover, they exchange messages
-#    in order to try to give an advantage to the replica with the best
-#    replication offset (more data from the master processed).
-#    Replicas will try to get their rank by offset, and apply to the start
-#    of the failover a delay proportional to their rank.
-#
-# 2) Every single replica computes the time of the last interaction with
-#    its master. This can be the last ping or command received (if the master
-#    is still in the "connected" state), or the time that elapsed since the
-#    disconnection with the master (if the replication link is currently down).
-#    If the last interaction is too old, the replica will not try to failover
-#    at all.
-#
-# The point "2" can be tuned by user. Specifically a replica will not perform
-# the failover if, since the last interaction with the master, the time
-# elapsed is greater than:
-#
-#   (node-timeout * replica-validity-factor) + repl-ping-replica-period
-#
-# So for example if node-timeout is 30 seconds, and the replica-validity-factor
-# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the
-# replica will not try to failover if it was not able to talk with the master
-# for longer than 310 seconds.
-#
-# A large replica-validity-factor may allow replicas with too old data to failover
-# a master, while a too small value may prevent the cluster from being able to
-# elect a replica at all.
-#
-# For maximum availability, it is possible to set the replica-validity-factor
-# to a value of 0, which means, that replicas will always try to failover the
-# master regardless of the last time they interacted with the master.
-# (However they'll always try to apply a delay proportional to their
-# offset rank).
-#
-# Zero is the only value able to guarantee that when all the partitions heal
-# the cluster will always be able to continue.
-#
-# cluster-replica-validity-factor 10
-
-# Cluster replicas are able to migrate to orphaned masters, that are masters
-# that are left without working replicas. This improves the cluster ability
-# to resist to failures as otherwise an orphaned master can't be failed over
-# in case of failure if it has no working replicas.
-#
-# Replicas migrate to orphaned masters only if there are still at least a
-# given number of other working replicas for their old master. This number
-# is the "migration barrier". A migration barrier of 1 means that a replica
-# will migrate only if there is at least 1 other working replica for its master
-# and so forth. It usually reflects the number of replicas you want for every
-# master in your cluster.
-#
-# Default is 1 (replicas migrate only if their masters remain with at least
-# one replica). To disable migration just set it to a very large value.
-# A value of 0 can be set but is useful only for debugging and dangerous
-# in production.
-#
-# cluster-migration-barrier 1
-
-# By default Redis Cluster nodes stop accepting queries if they detect there
-# is at least an hash slot uncovered (no available node is serving it).
-# This way if the cluster is partially down (for example a range of hash slots
-# are no longer covered) all the cluster becomes, eventually, unavailable.
-# It automatically returns available as soon as all the slots are covered again.
-#
-# However sometimes you want the subset of the cluster which is working,
-# to continue to accept queries for the part of the key space that is still
-# covered. In order to do so, just set the cluster-require-full-coverage
-# option to no.
-#
-# cluster-require-full-coverage yes
-
-# This option, when set to yes, prevents replicas from trying to failover its
-# master during master failures. However the master can still perform a
-# manual failover, if forced to do so.
-#
-# This is useful in different scenarios, especially in the case of multiple
-# data center operations, where we want one side to never be promoted if not
-# in the case of a total DC failure.
-#
-# cluster-replica-no-failover no
-
-# In order to setup your cluster make sure to read the documentation
-# available at http://redis.io web site.
-
-########################## CLUSTER DOCKER/NAT support  ########################
-
-# In certain deployments, Redis Cluster nodes address discovery fails, because
-# addresses are NAT-ted or because ports are forwarded (the typical case is
-# Docker and other containers).
-#
-# In order to make Redis Cluster working in such environments, a static
-# configuration where each node knows its public address is needed. The
-# following two options are used for this scope, and are:
-#
-# * cluster-announce-ip
-# * cluster-announce-port
-# * cluster-announce-bus-port
-#
-# Each instruct the node about its address, client port, and cluster message
-# bus port. The information is then published in the header of the bus packets
-# so that other nodes will be able to correctly map the address of the node
-# publishing the information.
-#
-# If the above options are not used, the normal Redis Cluster auto-detection
-# will be used instead.
-#
-# Note that when remapped, the bus port may not be at the fixed offset of
-# clients port + 10000, so you can specify any port and bus-port depending
-# on how they get remapped. If the bus-port is not set, a fixed offset of
-# 10000 will be used as usually.
-#
-# Example:
-#
-# cluster-announce-ip 10.1.1.5
-# cluster-announce-port 6379
-# cluster-announce-bus-port 6380
-
-################################## SLOW LOG ###################################
-
-# The Redis Slow Log is a system to log queries that exceeded a specified
-# execution time. The execution time does not include the I/O operations
-# like talking with the client, sending the reply and so forth,
-# but just the time needed to actually execute the command (this is the only
-# stage of command execution where the thread is blocked and can not serve
-# other requests in the meantime).
-#
-# You can configure the slow log with two parameters: one tells Redis
-# what is the execution time, in microseconds, to exceed in order for the
-# command to get logged, and the other parameter is the length of the
-# slow log. When a new command is logged the oldest one is removed from the
-# queue of logged commands.
-
-# The following time is expressed in microseconds, so 1000000 is equivalent
-# to one second. Note that a negative number disables the slow log, while
-# a value of zero forces the logging of every command.
-slowlog-log-slower-than 10000
-
-# There is no limit to this length. Just be aware that it will consume memory.
-# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 128
-
-################################ LATENCY MONITOR ##############################
-
-# The Redis latency monitoring subsystem samples different operations
-# at runtime in order to collect data related to possible sources of
-# latency of a Redis instance.
-#
-# Via the LATENCY command this information is available to the user that can
-# print graphs and obtain reports.
-#
-# The system only logs operations that were performed in a time equal or
-# greater than the amount of milliseconds specified via the
-# latency-monitor-threshold configuration directive. When its value is set
-# to zero, the latency monitor is turned off.
-#
-# By default latency monitoring is disabled since it is mostly not needed
-# if you don't have latency issues, and collecting data has a performance
-# impact, that while very small, can be measured under big load. Latency
-# monitoring can easily be enabled at runtime using the command
-# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
-latency-monitor-threshold 0
-
-############################# EVENT NOTIFICATION ##############################
-
-# Redis can notify Pub/Sub clients about events happening in the key space.
-# This feature is documented at http://redis.io/topics/notifications
-#
-# For instance if keyspace events notification is enabled, and a client
-# performs a DEL operation on key "foo" stored in the Database 0, two
-# messages will be published via Pub/Sub:
-#
-# PUBLISH __keyspace@0__:foo del
-# PUBLISH __keyevent@0__:del foo
-#
-# It is possible to select the events that Redis will notify among a set
-# of classes. Every class is identified by a single character:
-#
-#  K     Keyspace events, published with __keyspace@<db>__ prefix.
-#  E     Keyevent events, published with __keyevent@<db>__ prefix.
-#  g     Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
-#  $     String commands
-#  l     List commands
-#  s     Set commands
-#  h     Hash commands
-#  z     Sorted set commands
-#  x     Expired events (events generated every time a key expires)
-#  e     Evicted events (events generated when a key is evicted for maxmemory)
-#  A     Alias for g$lshzxe, so that the "AKE" string means all the events.
-#
-#  The "notify-keyspace-events" takes as argument a string that is composed
-#  of zero or multiple characters. The empty string means that notifications
-#  are disabled.
-#
-#  Example: to enable list and generic events, from the point of view of the
-#           event name, use:
-#
-#  notify-keyspace-events Elg
-#
-#  Example 2: to get the stream of the expired keys subscribing to channel
-#             name __keyevent@0__:expired use:
-#
-#  notify-keyspace-events Ex
-#
-#  By default all notifications are disabled because most users don't need
-#  this feature and the feature has some overhead. Note that if you don't
-#  specify at least one of K or E, no events will be delivered.
-notify-keyspace-events ""
-
-############################### ADVANCED CONFIG ###############################
-
-# Hashes are encoded using a memory efficient data structure when they have a
-# small number of entries, and the biggest entry does not exceed a given
-# threshold. These thresholds can be configured using the following directives.
-hash-max-ziplist-entries 512
-hash-max-ziplist-value 64
-
-# Lists are also encoded in a special way to save a lot of space.
-# The number of entries allowed per internal list node can be specified
-# as a fixed maximum size or a maximum number of elements.
-# For a fixed maximum size, use -5 through -1, meaning:
-# -5: max size: 64 Kb  <-- not recommended for normal workloads
-# -4: max size: 32 Kb  <-- not recommended
-# -3: max size: 16 Kb  <-- probably not recommended
-# -2: max size: 8 Kb   <-- good
-# -1: max size: 4 Kb   <-- good
-# Positive numbers mean store up to _exactly_ that number of elements
-# per list node.
-# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
-# but if your use case is unique, adjust the settings as necessary.
-list-max-ziplist-size -2
-
-# Lists may also be compressed.
-# Compress depth is the number of quicklist ziplist nodes from *each* side of
-# the list to *exclude* from compression.  The head and tail of the list
-# are always uncompressed for fast push/pop operations.  Settings are:
-# 0: disable all list compression
-# 1: depth 1 means "don't start compressing until after 1 node into the list,
-#    going from either the head or tail"
-#    So: [head]->node->node->...->node->[tail]
-#    [head], [tail] will always be uncompressed; inner nodes will compress.
-# 2: [head]->[next]->node->node->...->node->[prev]->[tail]
-#    2 here means: don't compress head or head->next or tail->prev or tail,
-#    but compress all nodes between them.
-# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
-# etc.
-list-compress-depth 0
-
-# Sets have a special encoding in just one case: when a set is composed
-# of just strings that happen to be integers in radix 10 in the range
-# of 64 bit signed integers.
-# The following configuration setting sets the limit in the size of the
-# set in order to use this special memory saving encoding.
-set-max-intset-entries 512
-
-# Similarly to hashes and lists, sorted sets are also specially encoded in
-# order to save a lot of space. This encoding is only used when the length and
-# elements of a sorted set are below the following limits:
-zset-max-ziplist-entries 128
-zset-max-ziplist-value 64
-
-# HyperLogLog sparse representation bytes limit. The limit includes the
-# 16 bytes header. When an HyperLogLog using the sparse representation crosses
-# this limit, it is converted into the dense representation.
-#
-# A value greater than 16000 is totally useless, since at that point the
-# dense representation is more memory efficient.
-#
-# The suggested value is ~ 3000 in order to have the benefits of
-# the space efficient encoding without slowing down too much PFADD,
-# which is O(N) with the sparse encoding. The value can be raised to
-# ~ 10000 when CPU is not a concern, but space is, and the data set is
-# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
-hll-sparse-max-bytes 3000
-
-# Streams macro node max size / items. The stream data structure is a radix
-# tree of big nodes that encode multiple items inside. Using this configuration
-# it is possible to configure how big a single node can be in bytes, and the
-# maximum number of items it may contain before switching to a new node when
-# appending new stream entries. If any of the following settings are set to
-# zero, the limit is ignored, so for instance it is possible to set just a
-# max entires limit by setting max-bytes to 0 and max-entries to the desired
-# value.
-stream-node-max-bytes 4096
-stream-node-max-entries 100
-
-# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
-# order to help rehashing the main Redis hash table (the one mapping top-level
-# keys to values). The hash table implementation Redis uses (see dict.c)
-# performs a lazy rehashing: the more operation you run into a hash table
-# that is rehashing, the more rehashing "steps" are performed, so if the
-# server is idle the rehashing is never complete and some more memory is used
-# by the hash table.
-#
-# The default is to use this millisecond 10 times every second in order to
-# actively rehash the main dictionaries, freeing memory when possible.
-#
-# If unsure:
-# use "activerehashing no" if you have hard latency requirements and it is
-# not a good thing in your environment that Redis can reply from time to time
-# to queries with 2 milliseconds delay.
-#
-# use "activerehashing yes" if you don't have such hard requirements but
-# want to free memory asap when possible.
-activerehashing yes
-
-# The client output buffer limits can be used to force disconnection of clients
-# that are not reading data from the server fast enough for some reason (a
-# common reason is that a Pub/Sub client can't consume messages as fast as the
-# publisher can produce them).
-#
-# The limit can be set differently for the three different classes of clients:
-#
-# normal -> normal clients including MONITOR clients
-# replica  -> replica clients
-# pubsub -> clients subscribed to at least one pubsub channel or pattern
-#
-# The syntax of every client-output-buffer-limit directive is the following:
-#
-# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
-#
-# A client is immediately disconnected once the hard limit is reached, or if
-# the soft limit is reached and remains reached for the specified number of
-# seconds (continuously).
-# So for instance if the hard limit is 32 megabytes and the soft limit is
-# 16 megabytes / 10 seconds, the client will get disconnected immediately
-# if the size of the output buffers reach 32 megabytes, but will also get
-# disconnected if the client reaches 16 megabytes and continuously overcomes
-# the limit for 10 seconds.
-#
-# By default normal clients are not limited because they don't receive data
-# without asking (in a push way), but just after a request, so only
-# asynchronous clients may create a scenario where data is requested faster
-# than it can read.
-#
-# Instead there is a default limit for pubsub and replica clients, since
-# subscribers and replicas receive data in a push fashion.
-#
-# Both the hard or the soft limit can be disabled by setting them to zero.
-client-output-buffer-limit normal 0 0 0
-client-output-buffer-limit replica 256mb 64mb 60
-client-output-buffer-limit pubsub 32mb 8mb 60
-
-# Client query buffers accumulate new commands. They are limited to a fixed
-# amount by default in order to avoid that a protocol desynchronization (for
-# instance due to a bug in the client) will lead to unbound memory usage in
-# the query buffer. However you can configure it here if you have very special
-# needs, such us huge multi/exec requests or alike.
-#
-# client-query-buffer-limit 1gb
-
-# In the Redis protocol, bulk requests, that are, elements representing single
-# strings, are normally limited ot 512 mb. However you can change this limit
-# here.
-#
-# proto-max-bulk-len 512mb
-
-# Redis calls an internal function to perform many background tasks, like
-# closing connections of clients in timeout, purging expired keys that are
-# never requested, and so forth.
-#
-# Not all tasks are performed with the same frequency, but Redis checks for
-# tasks to perform according to the specified "hz" value.
-#
-# By default "hz" is set to 10. Raising the value will use more CPU when
-# Redis is idle, but at the same time will make Redis more responsive when
-# there are many keys expiring at the same time, and timeouts may be
-# handled with more precision.
-#
-# The range is between 1 and 500, however a value over 100 is usually not
-# a good idea. Most users should use the default of 10 and raise this up to
-# 100 only in environments where very low latency is required.
-hz 10
-
-# Normally it is useful to have an HZ value which is proportional to the
-# number of clients connected. This is useful in order, for instance, to
-# avoid too many clients are processed for each background task invocation
-# in order to avoid latency spikes.
-#
-# Since the default HZ value by default is conservatively set to 10, Redis
-# offers, and enables by default, the ability to use an adaptive HZ value
-# which will temporary raise when there are many connected clients.
-#
-# When dynamic HZ is enabled, the actual configured HZ will be used as
-# as a baseline, but multiples of the configured HZ value will be actually
-# used as needed once more clients are connected. In this way an idle
-# instance will use very little CPU time while a busy instance will be
-# more responsive.
-dynamic-hz yes
-
-# When a child rewrites the AOF file, if the following option is enabled
-# the file will be fsync-ed every 32 MB of data generated. This is useful
-# in order to commit the file to the disk more incrementally and avoid
-# big latency spikes.
-aof-rewrite-incremental-fsync yes
-
-# When redis saves RDB file, if the following option is enabled
-# the file will be fsync-ed every 32 MB of data generated. This is useful
-# in order to commit the file to the disk more incrementally and avoid
-# big latency spikes.
-rdb-save-incremental-fsync yes
-
-

+ 0 - 3
docker/redisDocker/restart.sh

@@ -1,3 +0,0 @@
-#!/bin/bash -e
-echo "run: docker restart hfy_redis"
-docker restart hfy_redis

+ 0 - 1
docker/redisDocker/run_redis.sh

@@ -1 +0,0 @@
-sudo docker run -p 6890:6889 -v /data/redis:/data/redis -v /data/redis/logs:/data/redis/logs -it hfy_redis:v7.0.7

+ 0 - 19
docker/restartServer.sh

@@ -1,19 +0,0 @@
-#!/bin/bash
-
-echo "欢迎使用本程序管控docker安装下的应用"
-while true; do
-    read -p "请选择要重启的应用 1:mysql 2:redis 3:国标平台 5:退出" choice
-    if [ "$choice" == "1" ]; then
-        source ./mysqlDocker/restart.sh
-    elif [ "$choice" == "2" ]; then
-        source ./redisDocker/restart.sh
-    elif [ "$choice" == "3" ]; then
-        source ./gbDocker/restart.sh
-    elif [ "$choice" == "5" ]; then
-        # 退出软件
-        break
-    else
-        echo "[ERR] 暂时未受支持的列表项,请重新输入!!!!!!!!!!!!"
-    fi
-done
-echo "ok"

+ 0 - 57
docker/test.sh

@@ -1,57 +0,0 @@
-#!/bin/bash
-
-
-# ---START---
-echo -e "\033[33m
----------------
------SZHFY-----
-----GB28181----
----------------
-\033[0m";
-echo  -e "\033[33m 欢迎使用深圳合方圆科技一键部署国标平台安装程序 \033[0m
-";
-# 询问是否读取配置文件来进行安装
-while true; do
-    read -p "请输入1选择配置文件安装,输入2选择手动输入配置文件:" choice
-    if [ "$choice" == "1" ]; then
-        # 使用配置文件进行安装
-        source ./config.cfg
-        break
-
-    elif [ "$choice" == "2" ]; then
-        # 手动输入配置文件进行安装
-        break
-    else
-        echo "未知的列表项,请重新输入"
-    fi
-done
-
-# 询问是否需要通过docker一键安装 mysql
-source ./mysqlDocker/install.sh "$choice"
-echo "
-              国标平台数据库信息
-              地址:$mysqlHost
-              端口:$mysqlPort
-              账号:$mysqlUser
-              密码:$mysqlPasswd
-              "
-
-source ./redisDocker/install.sh "$choice"
-echo "
-              国标平台Redis数据库信息
-              地址:$redisHost
-              端口:$redisPort
-              密码:$redisPasswd
-              "
-
-# source ./zlmDocker/install.sh "$choice"
-# echo "
-#               ZLM流媒体服务器信息 zlm配置完毕
-#               zlmIP:$mediaIp
-#               streamIP:$streamIP
-#               sdpIP:$sdpIP
-#               webPort:$hookPort
-#               mediaId:$mediaId
-#               "
-source ./gbDocker/install.sh "$choice"
-# 询问安装redis

+ 0 - 56
docker/zlmDocker/Dockerfile

@@ -1,56 +0,0 @@
-FROM ubuntu:20.04
-MAINTAINER kindring
-VOLUME "/data"
-# 安装基础依赖 参考 https://github.com/ZLMediaKit/ZLMediaKit/blob/master/docker/ubuntu18.04/Dockerfile.devel
-RUN mkdir /data/zlm && \
-    apt-get update && \
-         DEBIAN_FRONTEND="noninteractive" \
-         apt-get install -y --no-install-recommends \
-         build-essential \
-         cmake \
-         git \
-         curl \
-         vim \
-         ca-certificates \
-         tzdata \
-         libssl-dev \
-         libpng-dev \
-         libmysqlclient-dev \
-         libx264-dev \
-         libfaac-dev \
-         ffmpeg
-RUN apt-get autoremove -y && \
-    apt-get clean -y && \
-    rm -rf /var/lib/apt/lists/* \
-
-WORKDIR /data/zlm
-# 参考文章 https://blog.csdn.net/haysonzeng/article/details/116754065
-COPY ./openssl_1_1_1-stable ./openssl_1_1_1-stable
-COPY ./cisco-libsrtp ./cisco-libsrtp
-COPY ./ZLMediaKit ./ZLMediaKit
-
-# 编译 openssl
-WORKDIR ./openssl_1_1_1-stable
-RUN ./config shared threads no-ssl3 --prefix=/var/tmp/dest --openssldir=/var/tmp/dest
-RUN make -j4 &&\
-    make install
-
-# 编译 libsrtp
-WORKDIR ../cisco-libsrtp
-RUN ./configure --enable-openssl \
-&& make -j4 \
-&& make install
-
-# 编译ZLMediakit
-
-
-WORKDIR ../ZLMediaKit
-RUN git submodule update --init
-RUN mkdir build
-WORKDIR  ./build
-RUN cmake -DENABLE_WEBRTC=on -DCMAKE_BUILD_TYPE=Release ../ \
-&& make
-ENV PATH /data/zlm/ZLMediakit/release/linux/Release:$PATH
-CMD MediaServer
-
-

+ 0 - 159
docker/zlmDocker/config.ini

@@ -1,159 +0,0 @@
-; auto-generated by mINI class {
-
-[api]
-apiDebug=1
-defaultSnap=./www/logo.png
-secret=035c73f7-bb6b-4889-a715-d9eb2d1925cc
-snapRoot=./www/snap/
-
-[cluster]
-origin_url=
-retry_count=3
-timeout_sec=15
-
-[ffmpeg]
-bin=/usr/local/bin/ffmpeg
-cmd=%s -fflags nobuffer -i %s -c:a aac -strict -2 -ar 44100 -ab 48k -c:v libx264  -f flv %s
-log=./ffmpeg/ffmpeg.log
-restart_sec=0
-snap=%s -i %s -y -f mjpeg -t 0.001 %s
-
-[general]
-addMuteAudio=1
-check_nvidia_dev=1
-continue_push_ms=3000
-enableVhost=0
-enable_audio=0
-enable_ffmpeg_log=0
-flowThreshold=1024
-fmp4_demand=0
-hls_demand=0
-maxStreamWaitMS=15000
-mediaServerId=your_server_id
-mergeWriteMS=0
-modifyStamp=0
-publishToHls=1
-publishToMP4=0
-resetWhenRePlay=1
-rtmp_demand=0
-rtsp_demand=0
-streamNoneReaderDelayMS=60000
-ts_demand=0
-unready_frame_cache=100
-wait_add_track_ms=3000
-wait_track_ready_ms=10000
-
-[hls]
-broadcastRecordTs=0
-deleteDelaySec=0
-fileBufSize=65536
-filePath=./www
-segDur=2
-segKeep=0
-segNum=3
-segRetain=5
-
-[hook]
-admin_params=secret=035c73f7-bb6b-4889-a715-d9eb2d1925cc
-alive_interval=10.0
-enable=1
-on_flow_report=http://192.168.1.21:29001/index/hook/on_flow_report
-on_http_access=http://192.168.1.21:29001/index/hook/on_http_access
-on_play=http://192.168.1.21:29001/index/hook/on_play
-on_publish=http://192.168.1.21:29001/index/hook/on_publish
-on_record_mp4=
-on_record_ts=http://192.168.1.21:29001/index/hook/on_record_ts
-on_rtsp_auth=http://192.168.1.21:29001/index/hook/on_rtsp_auth
-on_rtsp_realm=http://192.168.1.21:29001/index/hook/on_rtsp_realm
-on_send_rtp_stopped=http://192.168.1.21:29001/index/hook/on_send_rtp_stopped
-on_server_keepalive=http://192.168.1.21:29001/index/hook/on_server_keepalive
-on_server_started=http://192.168.1.21:29001/index/hook/on_server_started
-on_shell_login=http://192.168.1.21:29001/index/hook/on_shell_login
-on_stream_changed=http://192.168.1.21:29001/index/hook/on_stream_changed
-on_stream_none_reader=http://192.168.1.21:29001/index/hook/on_stream_none_reader
-on_stream_not_found=http://192.168.1.21:29001/index/hook/on_stream_not_found
-retry=1
-retry_delay=3.0
-timeoutSec=20
-
-[http]
-charSet=utf-8
-dirMenu=1
-forbidCacheSuffix=
-forwarded_ip_header=
-keepAliveSecond=30
-maxReqSize=40960
-notFound=<html><head><title>404 Not Found</title></head><body bgcolor="white"><center><h1>您访问的资源不存在!</h1></center><hr><center>ZLMediaKit(git hash:cf5527e,branch:master,build time:Oct 12 2022 11:31:51)</center></body></html>
-port=15070
-rootPath=./www
-sendBufSize=65536
-sslport=29010
-virtualPath=
-
-[multicast]
-addrMax=239.255.255.255
-addrMin=239.0.0.0
-udpTTL=64
-
-[record]
-appName=record
-fastStart=0
-fileBufSize=65536
-filePath=./www
-fileRepeat=0
-fileSecond=3600
-mp4_as_player=0
-sampleMS=500
-
-[rtc]
-externIP=192.168.1.203
-port=29100
-preferredCodecA=PCMU,PCMA,opus,mpeg4-generic
-preferredCodecV=H264,H265,AV1X,VP9,VP8
-rembBitRate=0
-timeoutSec=15
-
-[rtmp]
-handshakeSecond=15
-keepAliveSecond=15
-modifyStamp=0
-port=11935
-sslport=0
-
-[rtp]
-audioMtuSize=600
-rtpMaxSize=10
-videoMtuSize=1400
-
-[rtp_proxy]
-dumpDir=
-g711a_pt=8
-g711u_pt=0
-h264_pt=98
-h265_pt=99
-opus_pt=100
-port=30000
-port_range=30000-30500
-ps_pt=96
-timeoutSec=15
-ts_pt=33
-
-[rtsp]
-authBasic=0
-directProxy=1
-handshakeSecond=15
-keepAliveSecond=15
-port=11554
-sslport=0
-
-[shell]
-maxReqSize=1024
-port=0
-
-[srt]
-latencyMul=4
-pktBufSize=8192
-port=29101
-timeoutSec=5
-
-; } ---

+ 0 - 116
docker/zlmDocker/install.sh

@@ -1,116 +0,0 @@
-#!/bin/bash -e
-# zlm install
-SCRIPT_DIR=$(cd $(dirname ${BASH_SOURCE[0]}); pwd)
-function installZlm (){
-  echo "欢迎本 ZLMediakit 安装程序"
-  _useConfig=$1
-  _installMode=$2
-  w1=1
-  cd $SCRIPT_DIR
-  while [[ $w1 == 1 ]]
-  do
-    clear
-
-    echo "开始安装配置redis,请按照提示输入信息"
-    _MediaIP="0.0.0.0"
-    _SdpIP=""
-    _HookPort="15070"
-    _MediaId="hfy_zlm_media_id"
-    _Secret="035c73f7-bb6b-4889-a715-d9eb2d1925cc"
-    _RtpPortStart=30000
-    _RtpPortEnd=35000
-    if [  "$_useConfig" == "1" ]; then
-      echo "使用配置文件进行安装"
-      mediaIP=$media_host
-      streamIP=${media_streamHost:-"$mediaIP"}
-      sdpIP=${media_sdpHost:-"$mediaIP"}
-      mediaId=$media_id
-      mediaSecret=$media_Secret
-      hookPort=$media_webPort
-      rtpPortStart=$media_rtpPortStart
-      rtpPortEnd=$media_rtpPortEnd
-      w1=2
-    else
-      read -p "请输入zlm所使用的IP地址($_MediaIP): " mediaIP
-      mediaIP=${mediaIP:-"$_MediaIP"}
-      read -p "请输入zlm返回流所时的IP地址($_MediaIP): " streamIP
-      streamIP=${streamIP:"$_MediaIP"}
-      read -p "请输入ZLM的sdpId($_MediaIP): " sdpIP
-      sdpIP=${sdpIP:-"$_MediaIP"}
-      read -p "请输入ZLM的web端口($_HookPort): " hookPort
-      hookPort=${hookPort:-_HookPort}
-      read -p "请输入ZLM的rtp的开始端口(_RtpPortStart): " rtpPortStart
-      rtpPortStart=${rtpPortStart:-_RtpPortStart}
-      read -p "请输入ZLM的rtp的开始端口(_RtpPortEnd): " rtpPortEnd
-      rtpPortEnd=${rtpPortEnd:-_RtpPortEnd}
-      read -p "请输入ZLM的mediaId($_MediaId): " mediaId
-      mediaId=${mediaId:-_MediaId}
-      read -p "请输入ZLM的secret密钥($_Secret): " mediaSecret
-      mediaSecret=${mediaSecret:-_Secret}
-      w2=1
-      while [[ $w2 == 1 ]]
-      do
-        echo "
-                ZLM流媒体服务器信息
-                zlmIP:$mediaIP
-                streamIP:$streamIP
-                sdpIP:$sdpIP
-                webPort:$hookPort
-                mediaId:$mediaId
-                secret密钥: $mediaSecret
-                rtp端口范围:$rtpPortStart-$rtpPortEnd
-                "
-        echo "是否确认为此信息?y/n"
-        read _yn
-        if [[ $_yn == "y" ]] || [[ $_yn == "yes" ]] || [[ $_yn == "Y" ]] || [[ $_yn == "YES" ]] ; then
-          echo "确认信息";
-          w1=2;
-          break;
-        elif [[ $_yn == "n" ]] || [[ $_yn == "n" ]] || [[ $_yn == "N" ]] || [[ $_yn == "NO" ]] ; then
-          echo "重新输入"
-          w2=2;
-        else
-          echo "---------------------"
-        fi
-      done
-    fi
-  done
-  echo "zlm信息确认完成"
-
-  if [ $_installMode == 2 ];then
-    echo "导出密码"
-    return 0
-  fi
-}
-
-echo -e "\033[35m 是否通过本安装程序来安装ZLM? \033[0m";
-PS3="请输入数字来选择安装方式:";
-selectOption_1="自动安装并配置 ZLMediakit [未支持!]";
-selectOption_2="已有 ZLM ,手动输入 ZLM 相关信息";
-selectOption_3="手动修改配置文件来输入zlm相关参数";
-
-select=("$selectOption_1" "$selectOption_2" "$selectOption_3")
-installMode=$1
-installMode=${installMode:-2}
-select fav in "${select[@]}";do
-  case $fav in
-    "$selectOption_1")
-                echo "暂未支持此方法 $fav"
-#                installZlm "$installMode" 1
-#                break;
-                ;;
-    "$selectOption_2")
-                echo "已选择 $fav "
-                installZlm "$installMode" 2
-                break;
-                ;;
-    "$selectOption_3")
-                echo "已选择 $fav "
-                break;
-                ;;
-    *)
-      echo -e "\033[37m 请输入数字 1 2 3 来选择 ZLM 的安装方式 \033[0m"
-      ;;
-  esac
-done
-cd ../

+ 3 - 0
package/config.cfg

@@ -51,3 +51,6 @@ gb_aiPicPath="mFile"
 
 # 点播超时时间
 gb_play_timeout=30000
+
+
+

+ 6 - 2
package/startDockerImage.sh

@@ -240,8 +240,7 @@ if [ $? -eq 0 ]; then
     docker rm hfy_gp
 fi
 
-
-
+echo -e "\033[33m自动按照sql与redis配置已经自动关闭\033[0m"
 
 sudo docker run -it --net=host  \
  -v /data/gb:/data/gb -e LANG=C.UTF-8 \
@@ -253,4 +252,9 @@ sudo docker run -it --net=host  \
 if [ $? -ne 0 ]; then
   echo "run gb docker failed";
 fi
+
+# 修改国标平台配置文件,将 enable_redis 与 enable_sql 修改为 0 防止sql数据异常
+sed -i "s/enable_redis=1/enable_redis=0/g" "$SCRIPT_DIR/config.cfg"
+sed -i "s/enable_sql=1/enable_sql=0/g" "$SCRIPT_DIR/config.cfg"
+
 echo -e "\033[33m国标平台以全部启动,请使用浏览器访问 http://$gb_host:$gb_WebPort\033[0m";

+ 0 - 0
package/updateServer.sh


+ 7 - 2
src/main/java/com/genersoft/iot/vmp/gb28181/transmit/event/request/impl/message/response/cmd/PresetQueryResponseMessageHandler.java

@@ -7,6 +7,8 @@ import com.genersoft.iot.vmp.gb28181.transmit.callback.RequestMessage;
 import com.genersoft.iot.vmp.gb28181.transmit.event.request.SIPRequestProcessorParent;
 import com.genersoft.iot.vmp.gb28181.transmit.event.request.impl.message.IMessageHandler;
 import com.genersoft.iot.vmp.gb28181.transmit.event.request.impl.message.response.ResponseMessageHandler;
+import com.genersoft.iot.vmp.vmanager.bean.ErrorCode;
+import com.genersoft.iot.vmp.vmanager.bean.WVPResult;
 import gov.nist.javax.sip.message.SIPRequest;
 import org.dom4j.DocumentException;
 import org.dom4j.Element;
@@ -55,7 +57,7 @@ public class PresetQueryResponseMessageHandler extends SIPRequestProcessorParent
 
         try {
              Element rootElement = getRootElement(evt, device.getCharset());
-
+            logger.info("[预置位查询结果] 设备预置位查询应答处理");
             if (rootElement == null) {
                 logger.warn("[ 设备预置位查询应答 ] content cannot be null, {}", evt.getRequest());
                 try {
@@ -99,9 +101,12 @@ public class PresetQueryResponseMessageHandler extends SIPRequestProcessorParent
                 }
             }
             RequestMessage requestMessage = new RequestMessage();
+            RequestMessage msg = new RequestMessage();
             requestMessage.setKey(key);
-            requestMessage.setData(presetQuerySipReqList);
+            requestMessage.setData(WVPResult.success(presetQuerySipReqList));
+            logger.info("[预置位查询结果] 设备预置位查询应答处理完成,预置位数量:{}", sumNum);
             deferredResultHolder.invokeAllResult(requestMessage);
+//            resultHolder.invokeAllResult(msg);
             try {
                 responseAck(request, Response.OK);
             } catch (InvalidArgumentException | ParseException | SipException e) {

+ 14 - 5
src/main/java/com/genersoft/iot/vmp/vmanager/gb28181/ptz/PtzController.java

@@ -3,11 +3,13 @@ package com.genersoft.iot.vmp.vmanager.gb28181.ptz;
  
 import com.genersoft.iot.vmp.conf.exception.ControllerException;
 import com.genersoft.iot.vmp.gb28181.bean.Device;
+import com.genersoft.iot.vmp.gb28181.bean.PresetQuerySipReq;
 import com.genersoft.iot.vmp.gb28181.transmit.callback.DeferredResultHolder;
 import com.genersoft.iot.vmp.gb28181.transmit.callback.RequestMessage;
 import com.genersoft.iot.vmp.gb28181.transmit.cmd.impl.SIPCommander;
 import com.genersoft.iot.vmp.storager.IVideoManagerStorage;
 import com.genersoft.iot.vmp.vmanager.bean.ErrorCode;
+import com.genersoft.iot.vmp.vmanager.bean.WVPResult;
 import io.swagger.v3.oas.annotations.Operation;
 import io.swagger.v3.oas.annotations.Parameter;
 import io.swagger.v3.oas.annotations.tags.Tag;
@@ -22,6 +24,7 @@ import org.springframework.web.context.request.async.DeferredResult;
 import javax.sip.InvalidArgumentException;
 import javax.sip.SipException;
 import java.text.ParseException;
+import java.util.List;
 import java.util.UUID;
 
 @Tag(name  = "云台控制")
@@ -203,22 +206,26 @@ public class PtzController {
 	@Parameter(name = "deviceId", description = "设备国标编号", required = true)
 	@Parameter(name = "channelId", description = "通道国标编号", required = true)
 	@GetMapping("/preset/query/{deviceId}/{channelId}")
-	public DeferredResult<String> presetQueryApi(@PathVariable String deviceId, @PathVariable String channelId) {
+	public DeferredResult<WVPResult<List<PresetQuerySipReq>>> presetQueryApi(@PathVariable String deviceId, @PathVariable String channelId) {
 		if (logger.isDebugEnabled()) {
 			logger.debug("设备预置位查询API调用");
 		}
 		logger.info("[预置位查询] 获取设备预置位信息");
 		Device device = storager.queryVideoDevice(deviceId);
 		String uuid =  UUID.randomUUID().toString();
-		String key =  DeferredResultHolder.CALLBACK_CMD_PRESETQUERY + (ObjectUtils.isEmpty(channelId) ? deviceId : channelId);
-		DeferredResult<String> result = new DeferredResult<String> (3 * 1000L);
+		// 此处一开始使用的是channelId,但是发现有些设备没有channelId,所以改成deviceId
+		String key =  DeferredResultHolder.CALLBACK_CMD_PRESETQUERY + deviceId;
+		DeferredResult<WVPResult<List<PresetQuerySipReq>>> result = new DeferredResult<WVPResult<List<PresetQuerySipReq>>> (15 * 1000L);
+		WVPResult wvpResult = new WVPResult();
 		result.onTimeout(()->{
 			logger.warn(String.format("获取设备预置位超时"));
 			// 释放rtpserver
 			RequestMessage msg = new RequestMessage();
 			msg.setId(uuid);
 			msg.setKey(key);
-			msg.setData("获取设备预置位超时");
+			wvpResult.setCode(ErrorCode.ERROR404.getCode());
+			wvpResult.setMsg("获取设备预置位超时");
+			msg.setData(wvpResult);
 			resultHolder.invokeResult(msg);
 		});
 		if (resultHolder.exist(key, null)) {
@@ -230,7 +237,9 @@ public class PtzController {
 				RequestMessage msg = new RequestMessage();
 				msg.setId(uuid);
 				msg.setKey(key);
-				msg.setData(String.format("获取设备预置位失败,错误码: %s, %s", event.statusCode, event.msg));
+				wvpResult.setCode(ErrorCode.ERROR404.getCode());
+				wvpResult.setMsg(String.format("获取设备预置位失败,错误码: %s, %s", event.statusCode, event.msg));
+				msg.setData(wvpResult);
 				resultHolder.invokeResult(msg);
 			});
 		} catch (InvalidArgumentException | SipException | ParseException e) {

+ 92 - 236
web_src/package-lock.json

@@ -15,6 +15,7 @@
         "element-ui": "^2.15.6",
         "fingerprintjs2": "^2.1.2",
         "flv.js": "^1.6.2",
+        "kind-form-verify": "git+http://kindring.cn:9123/kindring/FormData.git",
         "moment": "^2.29.1",
         "mpegts.js": "^1.7.3",
         "ol": "^6.14.1",
@@ -186,15 +187,19 @@
       }
     },
     "node_modules/ajv": {
-      "version": "5.5.2",
-      "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-5.5.2.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-5.5.2.tgz",
-      "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=",
+      "version": "6.12.6",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+      "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
       "dev": true,
       "dependencies": {
-        "co": "^4.6.0",
-        "fast-deep-equal": "^1.0.0",
+        "fast-deep-equal": "^3.1.1",
         "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.3.0"
+        "json-schema-traverse": "^0.4.1",
+        "uri-js": "^4.2.2"
+      },
+      "funding": {
+        "type": "github",
+        "url": "https://github.com/sponsors/epoberezkin"
       }
     },
     "node_modules/ajv-keywords": {
@@ -2143,8 +2148,8 @@
     },
     "node_modules/co": {
       "version": "4.6.0",
-      "resolved": "https://registry.npm.taobao.org/co/download/co-4.6.0.tgz",
-      "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
+      "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+      "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==",
       "dev": true,
       "engines": {
         "iojs": ">= 1.0.0",
@@ -4692,9 +4697,9 @@
       }
     },
     "node_modules/fast-deep-equal": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-1.1.0.tgz",
-      "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=",
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+      "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
       "dev": true
     },
     "node_modules/fast-json-stable-stringify": {
@@ -4737,34 +4742,6 @@
         "webpack": "^2.0.0 || ^3.0.0 || ^4.0.0"
       }
     },
-    "node_modules/file-loader/node_modules/ajv": {
-      "version": "6.12.5",
-      "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-      "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-      "dev": true,
-      "dependencies": {
-        "fast-deep-equal": "^3.1.1",
-        "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.4.1",
-        "uri-js": "^4.2.2"
-      },
-      "funding": {
-        "type": "github",
-        "url": "https://github.com/sponsors/epoberezkin"
-      }
-    },
-    "node_modules/file-loader/node_modules/fast-deep-equal": {
-      "version": "3.1.3",
-      "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-      "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-      "dev": true
-    },
-    "node_modules/file-loader/node_modules/json-schema-traverse": {
-      "version": "0.4.1",
-      "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-      "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-      "dev": true
-    },
     "node_modules/file-loader/node_modules/schema-utils": {
       "version": "0.4.7",
       "resolved": "https://registry.npm.taobao.org/schema-utils/download/schema-utils-0.4.7.tgz?cache=0&sync_timestamp=1601922251376&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fschema-utils%2Fdownload%2Fschema-utils-0.4.7.tgz",
@@ -6273,9 +6250,9 @@
       "dev": true
     },
     "node_modules/json-schema-traverse": {
-      "version": "0.3.1",
-      "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.3.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.3.1.tgz",
-      "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=",
+      "version": "0.4.1",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+      "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
       "dev": true
     },
     "node_modules/json-stringify-pretty-compact": {
@@ -6304,6 +6281,11 @@
       "integrity": "sha1-TIzkQRh6Bhx0dPuHygjipjgZSJI=",
       "dev": true
     },
+    "node_modules/kind-form-verify": {
+      "version": "1.0.2",
+      "resolved": "git+http://kindring.cn:9123/kindring/FormData.git#4ca5863074484ad3d8a9674c1c0c92bdf6e6ee55",
+      "license": "MIT"
+    },
     "node_modules/kind-of": {
       "version": "3.2.2",
       "resolved": "https://registry.npm.taobao.org/kind-of/download/kind-of-3.2.2.tgz",
@@ -8960,34 +8942,6 @@
         "node": ">= 4"
       }
     },
-    "node_modules/postcss-loader/node_modules/ajv": {
-      "version": "6.12.5",
-      "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-      "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-      "dev": true,
-      "dependencies": {
-        "fast-deep-equal": "^3.1.1",
-        "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.4.1",
-        "uri-js": "^4.2.2"
-      },
-      "funding": {
-        "type": "github",
-        "url": "https://github.com/sponsors/epoberezkin"
-      }
-    },
-    "node_modules/postcss-loader/node_modules/fast-deep-equal": {
-      "version": "3.1.3",
-      "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-      "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-      "dev": true
-    },
-    "node_modules/postcss-loader/node_modules/json-schema-traverse": {
-      "version": "0.4.1",
-      "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-      "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-      "dev": true
-    },
     "node_modules/postcss-loader/node_modules/schema-utils": {
       "version": "0.4.7",
       "resolved": "https://registry.npm.taobao.org/schema-utils/download/schema-utils-0.4.7.tgz?cache=0&sync_timestamp=1601922251376&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fschema-utils%2Fdownload%2Fschema-utils-0.4.7.tgz",
@@ -11770,6 +11724,30 @@
         "node": ">= 4.3 < 5.0.0 || >= 5.10"
       }
     },
+    "node_modules/schema-utils/node_modules/ajv": {
+      "version": "5.5.2",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz",
+      "integrity": "sha512-Ajr4IcMXq/2QmMkEmSvxqfLN5zGmJ92gHXAeOXq1OekoH2rfDNsgdDoL2f7QaRCy7G/E6TpxBVdRuNraMztGHw==",
+      "dev": true,
+      "dependencies": {
+        "co": "^4.6.0",
+        "fast-deep-equal": "^1.0.0",
+        "fast-json-stable-stringify": "^2.0.0",
+        "json-schema-traverse": "^0.3.0"
+      }
+    },
+    "node_modules/schema-utils/node_modules/fast-deep-equal": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz",
+      "integrity": "sha512-fueX787WZKCV0Is4/T2cyAdM4+x1S3MXXOAhavE1ys/W42SHAPacLTQhucja22QBYrfGw50M2sRiXPtTGv9Ymw==",
+      "dev": true
+    },
+    "node_modules/schema-utils/node_modules/json-schema-traverse": {
+      "version": "0.3.1",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz",
+      "integrity": "sha512-4JD/Ivzg7PoW8NzdrBSr3UFwC9mHgvI7Z6z3QGBsSHgKaRTUDmyZAAKJo2UbG1kUVfS9WS8bi36N49U1xw43DA==",
+      "dev": true
+    },
     "node_modules/select": {
       "version": "1.1.2",
       "resolved": "https://registry.npm.taobao.org/select/download/select-1.1.2.tgz",
@@ -13001,40 +12979,12 @@
         "webpack": "^2.0.0 || ^3.0.0 || ^4.0.0"
       }
     },
-    "node_modules/uglifyjs-webpack-plugin/node_modules/ajv": {
-      "version": "6.12.5",
-      "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-      "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-      "dev": true,
-      "dependencies": {
-        "fast-deep-equal": "^3.1.1",
-        "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.4.1",
-        "uri-js": "^4.2.2"
-      },
-      "funding": {
-        "type": "github",
-        "url": "https://github.com/sponsors/epoberezkin"
-      }
-    },
     "node_modules/uglifyjs-webpack-plugin/node_modules/commander": {
       "version": "2.13.0",
       "resolved": "https://registry.npm.taobao.org/commander/download/commander-2.13.0.tgz?cache=0&sync_timestamp=1598576136669&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fcommander%2Fdownload%2Fcommander-2.13.0.tgz",
       "integrity": "sha1-aWS8pnaF33wfFDDFhPB9dZeIW5w=",
       "dev": true
     },
-    "node_modules/uglifyjs-webpack-plugin/node_modules/fast-deep-equal": {
-      "version": "3.1.3",
-      "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-      "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-      "dev": true
-    },
-    "node_modules/uglifyjs-webpack-plugin/node_modules/json-schema-traverse": {
-      "version": "0.4.1",
-      "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-      "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-      "dev": true
-    },
     "node_modules/uglifyjs-webpack-plugin/node_modules/schema-utils": {
       "version": "0.4.7",
       "resolved": "https://registry.npm.taobao.org/schema-utils/download/schema-utils-0.4.7.tgz?cache=0&sync_timestamp=1601922251376&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fschema-utils%2Fdownload%2Fschema-utils-0.4.7.tgz",
@@ -14376,28 +14326,6 @@
         "source-map": "~0.6.1"
       }
     },
-    "node_modules/webpack/node_modules/ajv": {
-      "version": "6.12.5",
-      "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-      "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-      "dev": true,
-      "dependencies": {
-        "fast-deep-equal": "^3.1.1",
-        "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.4.1",
-        "uri-js": "^4.2.2"
-      },
-      "funding": {
-        "type": "github",
-        "url": "https://github.com/sponsors/epoberezkin"
-      }
-    },
-    "node_modules/webpack/node_modules/fast-deep-equal": {
-      "version": "3.1.3",
-      "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-      "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-      "dev": true
-    },
     "node_modules/webpack/node_modules/has-flag": {
       "version": "2.0.0",
       "resolved": "https://registry.npm.taobao.org/has-flag/download/has-flag-2.0.0.tgz",
@@ -14407,12 +14335,6 @@
         "node": ">=0.10.0"
       }
     },
-    "node_modules/webpack/node_modules/json-schema-traverse": {
-      "version": "0.4.1",
-      "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-      "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-      "dev": true
-    },
     "node_modules/webpack/node_modules/source-map": {
       "version": "0.5.7",
       "resolved": "https://registry.npm.taobao.org/source-map/download/source-map-0.5.7.tgz",
@@ -14857,15 +14779,15 @@
       }
     },
     "ajv": {
-      "version": "5.5.2",
-      "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-5.5.2.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-5.5.2.tgz",
-      "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=",
+      "version": "6.12.6",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+      "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
       "dev": true,
       "requires": {
-        "co": "^4.6.0",
-        "fast-deep-equal": "^1.0.0",
+        "fast-deep-equal": "^3.1.1",
         "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.3.0"
+        "json-schema-traverse": "^0.4.1",
+        "uri-js": "^4.2.2"
       }
     },
     "ajv-keywords": {
@@ -16609,8 +16531,8 @@
     },
     "co": {
       "version": "4.6.0",
-      "resolved": "https://registry.npm.taobao.org/co/download/co-4.6.0.tgz",
-      "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
+      "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+      "integrity": "sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ==",
       "dev": true
     },
     "coa": {
@@ -18734,9 +18656,9 @@
       }
     },
     "fast-deep-equal": {
-      "version": "1.1.0",
-      "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-1.1.0.tgz",
-      "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=",
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+      "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
       "dev": true
     },
     "fast-json-stable-stringify": {
@@ -18770,30 +18692,6 @@
         "schema-utils": "^0.4.5"
       },
       "dependencies": {
-        "ajv": {
-          "version": "6.12.5",
-          "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-          "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-          "dev": true,
-          "requires": {
-            "fast-deep-equal": "^3.1.1",
-            "fast-json-stable-stringify": "^2.0.0",
-            "json-schema-traverse": "^0.4.1",
-            "uri-js": "^4.2.2"
-          }
-        },
-        "fast-deep-equal": {
-          "version": "3.1.3",
-          "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-          "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-          "dev": true
-        },
-        "json-schema-traverse": {
-          "version": "0.4.1",
-          "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-          "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-          "dev": true
-        },
         "schema-utils": {
           "version": "0.4.7",
           "resolved": "https://registry.npm.taobao.org/schema-utils/download/schema-utils-0.4.7.tgz?cache=0&sync_timestamp=1601922251376&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fschema-utils%2Fdownload%2Fschema-utils-0.4.7.tgz",
@@ -19968,9 +19866,9 @@
       "dev": true
     },
     "json-schema-traverse": {
-      "version": "0.3.1",
-      "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.3.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.3.1.tgz",
-      "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=",
+      "version": "0.4.1",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+      "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
       "dev": true
     },
     "json-stringify-pretty-compact": {
@@ -19996,6 +19894,10 @@
       "integrity": "sha1-TIzkQRh6Bhx0dPuHygjipjgZSJI=",
       "dev": true
     },
+    "kind-form-verify": {
+      "version": "git+http://kindring.cn:9123/kindring/FormData.git#4ca5863074484ad3d8a9674c1c0c92bdf6e6ee55",
+      "from": "kind-form-verify@git+http://kindring.cn:9123/kindring/FormData.git"
+    },
     "kind-of": {
       "version": "3.2.2",
       "resolved": "https://registry.npm.taobao.org/kind-of/download/kind-of-3.2.2.tgz",
@@ -22151,30 +22053,6 @@
         "schema-utils": "^0.4.0"
       },
       "dependencies": {
-        "ajv": {
-          "version": "6.12.5",
-          "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-          "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-          "dev": true,
-          "requires": {
-            "fast-deep-equal": "^3.1.1",
-            "fast-json-stable-stringify": "^2.0.0",
-            "json-schema-traverse": "^0.4.1",
-            "uri-js": "^4.2.2"
-          }
-        },
-        "fast-deep-equal": {
-          "version": "3.1.3",
-          "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-          "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-          "dev": true
-        },
-        "json-schema-traverse": {
-          "version": "0.4.1",
-          "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-          "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-          "dev": true
-        },
         "schema-utils": {
           "version": "0.4.7",
           "resolved": "https://registry.npm.taobao.org/schema-utils/download/schema-utils-0.4.7.tgz?cache=0&sync_timestamp=1601922251376&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fschema-utils%2Fdownload%2Fschema-utils-0.4.7.tgz",
@@ -24426,6 +24304,32 @@
       "dev": true,
       "requires": {
         "ajv": "^5.0.0"
+      },
+      "dependencies": {
+        "ajv": {
+          "version": "5.5.2",
+          "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz",
+          "integrity": "sha512-Ajr4IcMXq/2QmMkEmSvxqfLN5zGmJ92gHXAeOXq1OekoH2rfDNsgdDoL2f7QaRCy7G/E6TpxBVdRuNraMztGHw==",
+          "dev": true,
+          "requires": {
+            "co": "^4.6.0",
+            "fast-deep-equal": "^1.0.0",
+            "fast-json-stable-stringify": "^2.0.0",
+            "json-schema-traverse": "^0.3.0"
+          }
+        },
+        "fast-deep-equal": {
+          "version": "1.1.0",
+          "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz",
+          "integrity": "sha512-fueX787WZKCV0Is4/T2cyAdM4+x1S3MXXOAhavE1ys/W42SHAPacLTQhucja22QBYrfGw50M2sRiXPtTGv9Ymw==",
+          "dev": true
+        },
+        "json-schema-traverse": {
+          "version": "0.3.1",
+          "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz",
+          "integrity": "sha512-4JD/Ivzg7PoW8NzdrBSr3UFwC9mHgvI7Z6z3QGBsSHgKaRTUDmyZAAKJo2UbG1kUVfS9WS8bi36N49U1xw43DA==",
+          "dev": true
+        }
       }
     },
     "select": {
@@ -25445,36 +25349,12 @@
         "worker-farm": "^1.5.2"
       },
       "dependencies": {
-        "ajv": {
-          "version": "6.12.5",
-          "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-          "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-          "dev": true,
-          "requires": {
-            "fast-deep-equal": "^3.1.1",
-            "fast-json-stable-stringify": "^2.0.0",
-            "json-schema-traverse": "^0.4.1",
-            "uri-js": "^4.2.2"
-          }
-        },
         "commander": {
           "version": "2.13.0",
           "resolved": "https://registry.npm.taobao.org/commander/download/commander-2.13.0.tgz?cache=0&sync_timestamp=1598576136669&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fcommander%2Fdownload%2Fcommander-2.13.0.tgz",
           "integrity": "sha1-aWS8pnaF33wfFDDFhPB9dZeIW5w=",
           "dev": true
         },
-        "fast-deep-equal": {
-          "version": "3.1.3",
-          "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-          "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-          "dev": true
-        },
-        "json-schema-traverse": {
-          "version": "0.4.1",
-          "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-          "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-          "dev": true
-        },
         "schema-utils": {
           "version": "0.4.7",
           "resolved": "https://registry.npm.taobao.org/schema-utils/download/schema-utils-0.4.7.tgz?cache=0&sync_timestamp=1601922251376&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fschema-utils%2Fdownload%2Fschema-utils-0.4.7.tgz",
@@ -26175,36 +26055,12 @@
         "yargs": "^8.0.2"
       },
       "dependencies": {
-        "ajv": {
-          "version": "6.12.5",
-          "resolved": "https://registry.npm.taobao.org/ajv/download/ajv-6.12.5.tgz?cache=0&sync_timestamp=1600886864349&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fajv%2Fdownload%2Fajv-6.12.5.tgz",
-          "integrity": "sha1-GbDouuj0duW6ZmMAOHd1+xoApNo=",
-          "dev": true,
-          "requires": {
-            "fast-deep-equal": "^3.1.1",
-            "fast-json-stable-stringify": "^2.0.0",
-            "json-schema-traverse": "^0.4.1",
-            "uri-js": "^4.2.2"
-          }
-        },
-        "fast-deep-equal": {
-          "version": "3.1.3",
-          "resolved": "https://registry.npm.taobao.org/fast-deep-equal/download/fast-deep-equal-3.1.3.tgz",
-          "integrity": "sha1-On1WtVnWy8PrUSMlJE5hmmXGxSU=",
-          "dev": true
-        },
         "has-flag": {
           "version": "2.0.0",
           "resolved": "https://registry.npm.taobao.org/has-flag/download/has-flag-2.0.0.tgz",
           "integrity": "sha1-6CB68cx7MNRGzHC3NLXovhj4jVE=",
           "dev": true
         },
-        "json-schema-traverse": {
-          "version": "0.4.1",
-          "resolved": "https://registry.npm.taobao.org/json-schema-traverse/download/json-schema-traverse-0.4.1.tgz?cache=0&sync_timestamp=1599334207614&other_urls=https%3A%2F%2Fregistry.npm.taobao.org%2Fjson-schema-traverse%2Fdownload%2Fjson-schema-traverse-0.4.1.tgz",
-          "integrity": "sha1-afaofZUTq4u4/mO9sJecRI5oRmA=",
-          "dev": true
-        },
         "source-map": {
           "version": "0.5.7",
           "resolved": "https://registry.npm.taobao.org/source-map/download/source-map-0.5.7.tgz",

+ 1 - 0
web_src/package.json

@@ -17,6 +17,7 @@
     "element-ui": "^2.15.6",
     "fingerprintjs2": "^2.1.2",
     "flv.js": "^1.6.2",
+    "kind-form-verify": "git+http://kindring.cn:9123/kindring/FormData.git",
     "moment": "^2.29.1",
     "mpegts.js": "^1.7.3",
     "ol": "^6.14.1",

+ 2 - 0
web_src/src/components/GBRecordDetail.vue

@@ -105,6 +105,7 @@
   import player from './common/jessibuca.vue'
   import moment  from 'moment'
   import recordDownload from './dialog/recordDownload.vue'
+
 	export default {
 		name: 'app',
 		components: {
@@ -182,6 +183,7 @@
       this.playerStyle["height"] = this.winHeight + "px";
       this.chooseDate = moment().format('YYYY-MM-DD')
       this.dateChange();
+
 		},
 		destroyed() {
 			this.$destroy('recordVideoPlayer');

+ 99 - 36
web_src/src/components/common/ptzControl.vue

@@ -45,42 +45,65 @@
 
     <div class="control-panel">
 <!--      预置位 -->
-<!--      <el-tabs tab-position="left" style="height: 200px;">-->
-<!--        <el-tab-pane label="预置位">-->
-<!--&lt;!&ndash;          预置位查询 &ndash;&gt;-->
-
-<!--        </el-tab-pane>-->
+      <el-tabs tab-position="left" style="height: 200px;">
+        <el-tab-pane label="预置位">
+<!--          预置位查询 -->
+          <el-table
+            v-loading="presetLoading"
+            :data="presetList"
+            height="100%"
+            :row-style="{height: 30}"
+            style="width: 100%;font-size: 16px">
+            <el-table-column
+              prop="ind"
+              label="序号"
+              width="90">
+            </el-table-column>
+            <el-table-column
+              prop="remark"
+              label="备注"
+              width="100">
+            </el-table-column>
+            <el-table-column
+              prop="operation">
+
+              <template slot="header" slot-scope="scope">
+                <el-button type="primary" size="mini" icon="el-icon-refresh" @click="queryPresetPos"></el-button>
+              </template>
+            </el-table-column>
+          </el-table>
+        </el-tab-pane>
 <!--        <el-tab-pane label="巡航">配置管理</el-tab-pane>-->
 <!--        <el-tab-pane label="扫描">角色管理</el-tab-pane>-->
-<!--      </el-tabs>-->
-      <el-button-group>
-        <el-tag style="position :absolute; left: 0rem; top: 0rem; width: 5rem; text-align: center" size="medium">预置位编号</el-tag>
-        <el-input-number style="position: absolute; left: 5rem; top: 0rem; width: 6rem" size="mini" v-model="presetPos" controls-position="right" :precision="0" :step="1" :min="1" :max="255"></el-input-number>
-        <el-button style="position: absolute; left: 11rem; top: 0rem; width: 5rem" size="mini" icon="el-icon-add-location" @click="presetPosition(129, presetPos)">设置</el-button>
-        <el-button style="position: absolute; left: 27rem; top: 0rem; width: 5rem" size="mini" type="primary" icon="el-icon-place" @click="presetPosition(130, presetPos)">调用</el-button>
-        <el-button style="position: absolute; left: 16rem; top: 0rem; width: 5rem" size="mini" icon="el-icon-delete-location" @click="presetPosition(131, presetPos)">删除</el-button>
-        <el-tag style="position :absolute; left: 0rem; top: 2.5rem; width: 5rem; text-align: center" size="medium">巡航速度</el-tag>
-        <el-input-number style="position: absolute; left: 5rem; top: 2.5rem; width: 6rem" size="mini" v-model="cruisingSpeed" controls-position="right" :precision="0" :min="1" :max="4095"></el-input-number>
-        <el-button style="position: absolute; left: 11rem; top: 2.5rem; width: 5rem" size="mini" icon="el-icon-loading" @click="setSpeedOrTime(134, cruisingGroup, cruisingSpeed)">设置</el-button>
-        <el-tag style="position :absolute; left: 16rem; top: 2.5rem; width: 5rem; text-align: center" size="medium">停留时间</el-tag>
-        <el-input-number style="position: absolute; left: 21rem; top: 2.5rem; width: 6rem" size="mini" v-model="cruisingTime" controls-position="right" :precision="0" :min="1" :max="4095"></el-input-number>
-        <el-button style="position: absolute; left: 27rem; top: 2.5rem; width: 5rem" size="mini" icon="el-icon-timer" @click="setSpeedOrTime(135, cruisingGroup, cruisingTime)">设置</el-button>
-        <el-tag style="position :absolute; left: 0rem; top: 4.5rem; width: 5rem; text-align: center" size="medium">巡航组编号</el-tag>
-        <el-input-number style="position: absolute; left: 5rem; top: 4.5rem; width: 6rem" size="mini" v-model="cruisingGroup" controls-position="right" :precision="0" :min="0" :max="255"></el-input-number>
-        <el-button style="position: absolute; left: 11rem; top: 4.5rem; width: 5rem" size="mini" icon="el-icon-add-location" @click="setCommand(132, cruisingGroup, presetPos)">添加点</el-button>
-        <el-button style="position: absolute; left: 16rem; top: 4.5rem; width: 5rem" size="mini" icon="el-icon-delete-location" @click="setCommand(133, cruisingGroup, presetPos)">删除点</el-button>
-        <el-button style="position: absolute; left: 21rem; top: 4.5rem; width: 5rem" size="mini" icon="el-icon-delete" @click="setCommand(133, cruisingGroup, 0)">删除组</el-button>
-        <el-button style="position: absolute; left: 27rem; top: 5rem; width: 5rem" size="mini" type="primary" icon="el-icon-video-camera-solid" @click="setCommand(136, cruisingGroup, 0)">巡航</el-button>
-        <el-tag style="position :absolute; left: 0rem; top: 7rem; width: 5rem; text-align: center" size="medium">扫描速度</el-tag>
-        <el-input-number style="position: absolute; left: 5rem; top: 7rem; width: 6rem" size="mini" v-model="scanSpeed" controls-position="right" :precision="0" :min="1" :max="4095"></el-input-number>
-        <el-button style="position: absolute; left: 11rem; top: 7rem; width: 5rem" size="mini" icon="el-icon-loading" @click="setSpeedOrTime(138, scanGroup, scanSpeed)">设置</el-button>
-        <el-tag style="position :absolute; left: 0rem; top: 9rem; width: 5rem; text-align: center" size="medium">扫描组编号</el-tag>
-        <el-input-number style="position: absolute; left: 5rem; top: 9rem; width: 6rem" size="mini" v-model="scanGroup" controls-position="right" :precision="0" :step="1" :min="0" :max="255"></el-input-number>
-        <el-button style="position: absolute; left: 11rem; top: 9rem; width: 5rem" size="mini" icon="el-icon-d-arrow-left" @click="setCommand(137, scanGroup, 1)">左边界</el-button>
-        <el-button style="position: absolute; left: 16rem; top: 9rem; width: 5rem" size="mini" icon="el-icon-d-arrow-right" @click="setCommand(137, scanGroup, 2)">右边界</el-button>
-        <el-button style="position: absolute; left: 27rem; top: 7rem; width: 5rem" size="mini" type="primary" icon="el-icon-video-camera-solid" @click="setCommand(137, scanGroup, 0)">扫描</el-button>
-        <el-button style="position: absolute; left: 27rem; top: 9rem; width: 5rem" size="mini" type="danger" icon="el-icon-switch-button" @click="ptzCamera('stop')">停止</el-button>
-      </el-button-group>
+      </el-tabs>
+<!--      <el-button-group>-->
+<!--        <el-tag style="position :absolute; left: 0rem; top: 0rem; width: 5rem; text-align: center" size="medium">预置位编号</el-tag>-->
+<!--        <el-input-number style="position: absolute; left: 5rem; top: 0rem; width: 6rem" size="mini" v-model="presetPos" controls-position="right" :precision="0" :step="1" :min="1" :max="255"></el-input-number>-->
+<!--        <el-button style="position: absolute; left: 11rem; top: 0rem; width: 5rem" size="mini" icon="el-icon-add-location" @click="presetPosition(129, presetPos)">设置</el-button>-->
+<!--        <el-button style="position: absolute; left: 27rem; top: 0rem; width: 5rem" size="mini" type="primary" icon="el-icon-place" @click="presetPosition(130, presetPos)">调用</el-button>-->
+<!--        <el-button style="position: absolute; left: 16rem; top: 0rem; width: 5rem" size="mini" icon="el-icon-delete-location" @click="presetPosition(131, presetPos)">删除</el-button>-->
+<!--        <el-tag style="position :absolute; left: 0rem; top: 2.5rem; width: 5rem; text-align: center" size="medium">巡航速度</el-tag>-->
+<!--        <el-input-number style="position: absolute; left: 5rem; top: 2.5rem; width: 6rem" size="mini" v-model="cruisingSpeed" controls-position="right" :precision="0" :min="1" :max="4095"></el-input-number>-->
+<!--        <el-button style="position: absolute; left: 11rem; top: 2.5rem; width: 5rem" size="mini" icon="el-icon-loading" @click="setSpeedOrTime(134, cruisingGroup, cruisingSpeed)">设置</el-button>-->
+<!--        <el-tag style="position :absolute; left: 16rem; top: 2.5rem; width: 5rem; text-align: center" size="medium">停留时间</el-tag>-->
+<!--        <el-input-number style="position: absolute; left: 21rem; top: 2.5rem; width: 6rem" size="mini" v-model="cruisingTime" controls-position="right" :precision="0" :min="1" :max="4095"></el-input-number>-->
+<!--        <el-button style="position: absolute; left: 27rem; top: 2.5rem; width: 5rem" size="mini" icon="el-icon-timer" @click="setSpeedOrTime(135, cruisingGroup, cruisingTime)">设置</el-button>-->
+<!--        <el-tag style="position :absolute; left: 0rem; top: 4.5rem; width: 5rem; text-align: center" size="medium">巡航组编号</el-tag>-->
+<!--        <el-input-number style="position: absolute; left: 5rem; top: 4.5rem; width: 6rem" size="mini" v-model="cruisingGroup" controls-position="right" :precision="0" :min="0" :max="255"></el-input-number>-->
+<!--        <el-button style="position: absolute; left: 11rem; top: 4.5rem; width: 5rem" size="mini" icon="el-icon-add-location" @click="setCommand(132, cruisingGroup, presetPos)">添加点</el-button>-->
+<!--        <el-button style="position: absolute; left: 16rem; top: 4.5rem; width: 5rem" size="mini" icon="el-icon-delete-location" @click="setCommand(133, cruisingGroup, presetPos)">删除点</el-button>-->
+<!--        <el-button style="position: absolute; left: 21rem; top: 4.5rem; width: 5rem" size="mini" icon="el-icon-delete" @click="setCommand(133, cruisingGroup, 0)">删除组</el-button>-->
+<!--        <el-button style="position: absolute; left: 27rem; top: 5rem; width: 5rem" size="mini" type="primary" icon="el-icon-video-camera-solid" @click="setCommand(136, cruisingGroup, 0)">巡航</el-button>-->
+<!--        <el-tag style="position :absolute; left: 0rem; top: 7rem; width: 5rem; text-align: center" size="medium">扫描速度</el-tag>-->
+<!--        <el-input-number style="position: absolute; left: 5rem; top: 7rem; width: 6rem" size="mini" v-model="scanSpeed" controls-position="right" :precision="0" :min="1" :max="4095"></el-input-number>-->
+<!--        <el-button style="position: absolute; left: 11rem; top: 7rem; width: 5rem" size="mini" icon="el-icon-loading" @click="setSpeedOrTime(138, scanGroup, scanSpeed)">设置</el-button>-->
+<!--        <el-tag style="position :absolute; left: 0rem; top: 9rem; width: 5rem; text-align: center" size="medium">扫描组编号</el-tag>-->
+<!--        <el-input-number style="position: absolute; left: 5rem; top: 9rem; width: 6rem" size="mini" v-model="scanGroup" controls-position="right" :precision="0" :step="1" :min="0" :max="255"></el-input-number>-->
+<!--        <el-button style="position: absolute; left: 11rem; top: 9rem; width: 5rem" size="mini" icon="el-icon-d-arrow-left" @click="setCommand(137, scanGroup, 1)">左边界</el-button>-->
+<!--        <el-button style="position: absolute; left: 16rem; top: 9rem; width: 5rem" size="mini" icon="el-icon-d-arrow-right" @click="setCommand(137, scanGroup, 2)">右边界</el-button>-->
+<!--        <el-button style="position: absolute; left: 27rem; top: 7rem; width: 5rem" size="mini" type="primary" icon="el-icon-video-camera-solid" @click="setCommand(137, scanGroup, 0)">扫描</el-button>-->
+<!--        <el-button style="position: absolute; left: 27rem; top: 9rem; width: 5rem" size="mini" type="danger" icon="el-icon-switch-button" @click="ptzCamera('stop')">停止</el-button>-->
+<!--      </el-button-group>-->
     </div>
   </div>
 </template>
@@ -346,7 +369,8 @@ export default {
      * @param step
      * @returns {Promise<void>}
      */
-    async sendCommand(command,step=0){
+    async sendCommand(command,step=0)
+    {
       console.log(`[send] ${command} - ${step}`);
       // step = step * 5
       let url = `/api/ptz/c/${this.deviceId}/${this.channelId}/?c=${command}&step=${step*this.stepValue}`
@@ -358,7 +382,10 @@ export default {
       this.clickCount = 0;
       if(err){console.error(err)}
     },
-    async queryPushParam(){
+
+
+    async queryPushParam()
+    {
       let url = `/api/server/pushConfig`
       let [err,res] = await handle(this.$axios.axios({
         method: 'get',
@@ -378,6 +405,42 @@ export default {
         this.$message.warning(res.msg)
       }
 
+    },
+
+    async queryPresetPos(){
+      console.log('请求预置位');
+      this.presetLoading = true;
+      let n_presetLength = 255;
+      let presetList = new Array(n_presetLength);
+      let queryUrl = `/api/ptz/preset/query/${this.deviceId}/${this.channelId}`
+      // 加载预置位
+      let [err,res] = await handle(this.$axios.axios({
+        method: 'get',
+        url: queryUrl
+      }));
+      this.presetLoading = false;
+      presetList = presetList.map((item,i)=>{
+        return {
+          ind: i+1,
+          remark: `预置位${i+1}`,
+          load: false,
+        }
+      })
+      if (err){
+        if(err){
+          console.error(err)
+          this.presetList = presetList;
+          return this.$message.error(err.message);
+        }
+      }
+      console.log(res);
+      let response = res.data;
+      if (response.code === 0){
+
+      }else{
+        this.$message.warning(res.msg)
+      }
+
     }
   },