日志量大的问题,在对应模块下修改logback-prod.xml也不生效

Blade 未结 1 1009
ccc
ccc 剑圣 2021-11-18 09:42

目标

限制日志输出,不要在短时间内将服务器内存占满。


一些疑问

docker logs -f 输出的日志是不是都是STDOUT的日志呢?

然后docker logs 的日志会写在/var/lib/docker/containers/目录里面

通过执行deploy.sh中的rmLogs可以清空这个文件占用。


容器里面是不是还有一部分日志文件保存了呢?

我看logback-prod.xml里面INFO_APPENDER 和 ERROR_APPENDER这两部分会往容器内写文件。

默认的情况下并没有设置删除策略,这部分日志也会占满服务器磁盘呢。


清理日志占用的思路,是不是这两部分都需要限制一下呢?

使用elk的情况下,docker的日志文件和docker容器的日志文件还会继续生成吧。

实际测试

对应模块下新增logback-prod.xml文件,运行环境也是prod。镜像内还是会生成日志。

修改后的文件内容如下。

<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="60 seconds">
    <!-- 自定义参数监听 -->
    <contextListener class="info.kingdata.libra.core.log.listener.LoggerStartupListener"/>
    <springProperty scope="context" name="springAppName" source="spring.application.name"/>

    <!-- 彩色日志依赖的渲染类 -->
    <conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
    <conversionRule conversionWord="wex"
                    converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
    <conversionRule conversionWord="wEx"
                    converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
    <!-- 彩色日志格式 -->
    <property name="CONSOLE_LOG_PATTERN"
              value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>
    <!-- 控制台输出 -->
    <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
        <encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
            <pattern>${CONSOLE_LOG_PATTERN}</pattern>
            <charset>utf8</charset>
        </encoder>
    </appender>

    <if condition='property("ELK_MODE").toUpperCase().contains("TRUE")'>
        <then>
            <!-- 推送日志至elk -->
            <appender name="INFO_LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
                <destination>${DESTINATION}</destination>
                <!-- 日志输出编码 -->
                <encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
                    <providers>
                        <timestamp>
                            <timeZone>UTC</timeZone>
                        </timestamp>
                        <pattern>
                            <pattern>
                                {
                                "traceId": "%X{traceId}",
                                "requestId": "%X{requestId}",
                                "accountId": "%X{accountId}",
                                "tenantId": "%X{tenantId}",
                                "logLevel": "%level",
                                "serviceName": "${springAppName:-SpringApp}",
                                "pid": "${PID:-}",
                                "thread": "%thread",
                                "class": "%logger{40}",
                                "line":"%L",
                                "message": "%message"
                                }
                            </pattern>
                        </pattern>
                        <mdc/>
                        <stackTrace/>
                    </providers>
                </encoder>
                <!-- 打印日志级别 -->
                <filter class="ch.qos.logback.classic.filter.LevelFilter">
                    <level>INFO</level>
                    <onMatch>ACCEPT</onMatch>
                    <onMismatch>DENY</onMismatch>
                </filter>
            </appender>

            <!-- 推送日志至elk -->
            <appender name="ERROR_LOGSTASH" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
                <destination>${DESTINATION}</destination>
                <!-- 日志输出编码 -->
                <encoder charset="UTF-8" class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
                    <providers>
                        <timestamp>
                            <timeZone>UTC</timeZone>
                        </timestamp>
                        <pattern>
                            <pattern>
                                {
                                "traceId": "%X{traceId}",
                                "requestId": "%X{requestId}",
                                "accountId": "%X{accountId}",
                                "tenantId": "%X{tenantId}",
                                "logLevel": "%level",
                                "serviceName": "${springAppName:-SpringApp}",
                                "pid": "${PID:-}",
                                "thread": "%thread",
                                "class": "%logger{40}",
                                "line":"%L",
                                "message": "%message"
                                }
                            </pattern>
                        </pattern>
                        <mdc/>
                        <stackTrace/>
                    </providers>
                </encoder>
                <!-- 打印日志级别 -->
                <filter class="ch.qos.logback.classic.filter.LevelFilter">
                    <level>ERROR</level>
                    <onMatch>ACCEPT</onMatch>
                    <onMismatch>DENY</onMismatch>
                </filter>
            </appender>
        </then>
    </if>

    <!-- 日志输出级别 -->
    <root level="INFO">
        <appender-ref ref="STDOUT"/>
    </root>

    <logger name="net.sf.ehcache" level="INFO"/>
    <logger name="druid.sql" level="INFO"/>

    <!-- 减少nacos日志 -->
    <logger name="com.alibaba.nacos" level="ERROR"/>

</configuration>


1条回答
  • 2021-11-22 22:36

    你的问题应该是这个:https://www.jianshu.com/p/6277b38d4114

    docker本身的日志过大,应该和logback关系不大,具体看下上面的帖子试试

    0 讨论(0)
提交回复