0%

Slf4j日志框架的使用及Logback、Log4j2的整合和性能对比

一、Slf4j

slf4j(Simple Logging Facade for Java)是日志框架的一种抽象,那么也就是说 slf4j 是不能单独使用的必须要有其他实现日志框架来配合使用,并且如果要启用slf4j框架要导入slf4j-api-xxx.jar 这个包, 这个包是slf4j 实现各种支持的日志框架的包。如 log4j、log4j2、logback等。

编码模式

1
2
3
4
5
6
7
8
9
10
11
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
...
...
private static final Logger logger = LoggerFactory.getLogger(T.class);

logger.trace();
logger.debug();
logger.info();
logger.warn();
logger.error();

注解模式

IDEA需要安装Lombok插件

pom文件引入lombok依赖

1
2
3
4
5
6
7
8
9
10
@slf4j
public class OauthApp {
...
...
logger.trace();
logger.debug();
logger.info();
logger.warn();
logger.error();
}

二、Logback

logback-spring.xml配置文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<include resource="org/springframework/boot/logging/logback/defaults.xml"/>
?
<springProperty scope="context" name="springAppName" source="spring.application.name"/>
<springProperty scope="context" name="ELK_FILEBEAT_PATH" source="elk.filebeat_path" defaultValue="/PATHTO/log"/>
<springProperty scope="context" name="ELK_URL" source="elk.url" defaultValue="127.0.0.1"/>?
<springProperty scope="context" name="ELK_QUEUE_SIZE" source="elk.queue_size" defaultValue="8192"/>?
<!-- Example for logging into the build folder of your project -->
<property name="LOG_FILE" value="${BUILD_FOLDER:-build}/${springAppName}"/>?

<!-- You can override this to have a custom pattern -->
<property name="CONSOLE_LOG_PATTERN"
value="%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(%X{transNo}){faint} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}"/>

<!-- Appender to log to console -->
<appender name="console" class="ch.qos.logback.core.ConsoleAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<!-- Minimum logging level to be presented in the console logs-->
<level>DEBUG</level>
</filter>
<encoder>
<pattern>${CONSOLE_LOG_PATTERN}</pattern>
<charset>utf8</charset>
</encoder>
</appender>

<appender name="logstash" class="net.logstash.logback.appender.LogstashTcpSocketAppender">
<filter class="ch.qos.logback.classic.filter.ThresholdFilter">
<level>DEBUG</level>
</filter>
<destination>${ELK_URL}</destination>
<queueSize>${ELK_QUEUE_SIZE}</queueSize>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<timestamp>
<timeZone>UTC</timeZone>
</timestamp>
<pattern>
<pattern>
{
"logdate":"%date{ISO8601}",
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message",
"transNo": "%X{transNo}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>

<appender name="FILE_INFO" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>INFO</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- daily rollover -->
<!--<fileNamePattern>C:/aaa/log/uw.%d{yyyy-MM-dd}.log</fileNamePattern>-->
<fileNamePattern>${ELK_FILEBEAT_PATH}/fin-info.%d{yyyy-MM-dd}-%i.log</fileNamePattern>
<maxFileSize>128MB</maxFileSize>
<!-- keep 30 days' worth of history capped at 3GB total size -->
<maxHistory>30</maxHistory>
<totalSizeCap>2GB</totalSizeCap>
</rollingPolicy>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<!--<timestamp>-->
<!--<timeZone>UTC</timeZone>-->
<!--</timestamp>-->
<pattern>
<pattern>
{
"logdate":"%date{ISO8601}",
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message",
"transNo": "%X{transNo}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<appender name="FILE_DEBUG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>DEBUG</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<rollingPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
<!-- daily rollover -->
<!--<fileNamePattern>C:/aaa/log/uw.%d{yyyy-MM-dd}.log</fileNamePattern>-->
<fileNamePattern>${ELK_FILEBEAT_PATH}/fin-debug.%d{yyyy-MM-dd}-%i.log</fileNamePattern>
<maxFileSize>128MB</maxFileSize>
<!-- keep 30 days' worth of history capped at 3GB total size -->
<maxHistory>30</maxHistory>
<totalSizeCap>2GB</totalSizeCap>
</rollingPolicy>
<encoder class="net.logstash.logback.encoder.LoggingEventCompositeJsonEncoder">
<providers>
<!--<timestamp>-->
<!--<timeZone>UTC</timeZone>-->
<!--</timestamp>-->
<pattern>
<pattern>
{
"logdate":"%date{ISO8601}",
"severity": "%level",
"service": "${springAppName:-}",
"trace": "%X{X-B3-TraceId:-}",
"span": "%X{X-B3-SpanId:-}",
"parent": "%X{X-B3-ParentSpanId:-}",
"exportable": "%X{X-Span-Export:-}",
"pid": "${PID:-}",
"thread": "%thread",
"class": "%logger{40}",
"rest": "%message",
"transNo": "%X{transNo}"
}
</pattern>
</pattern>
</providers>
</encoder>
</appender>
<!--<appender name="ASYNC" class="ch.qos.logback.classic.AsyncAppender">-->
<!--<appender-ref ref="FILE" />-->
<!--</appender>-->
?
<root level="info">
<appender-ref ref="console"/>
<!-- uncomment this to have also JSON logs -->
<appender-ref ref="logstash"/>
<!-- <appender-ref ref="FILE" />-->
<!--<appender-ref ref="ASYNC"/>-->
</root>
<logger name="com.sinosoft" level="INFO" additivity="false">
<appender-ref ref="console"/>
<!-- uncomment this to have also JSON logs -->
<appender-ref ref="logstash"/>
<appender-ref ref="FILE_INFO" />
</logger>
<logger name="com.sinosoft" level="DEBUG" additivity="false">
<appender-ref ref="console"/>
<!-- uncomment this to have also JSON logs -->
<appender-ref ref="logstash"/>
<appender-ref ref="FILE_DEBUG" />
</logger>
</configuration>

属性配置:

  • elk.filebeat_path
  • elk.url
  • elk.queue_size

三、Log4j2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
<?xml version="1.0" encoding="UTF-8"?>
<!--日志级别以及优先级排序: OFF > FATAL > ERROR > WARN > INFO > DEBUG > TRACE > ALL -->
<!--status="WARN" :用于设置log4j2自身内部日志的信息输出级别,默认是OFF-->
<!--monitorInterval="30" :间隔秒数,自动检测配置文件的变更和重新配置本身-->
<configuration status="warn" monitorInterval="60" strict="true">
<properties>
<!--自定义一些常量,之后使用${变量名}引用-->
<property name="logpath">./logs</property>
<property name="charset">UTF-8</property>
<!--自定义的输出格式-->
<property name="pattern">%-d{yyyy-MM-dd HH:mm:ss.SSS}@@%p@@%X{ip}@@%t %C@@%X{requestId} %M %m %n </property>
</properties>
<!--appenders:定义输出内容,输出格式,输出方式,日志保存策略等,常用其下三种标签[console,File,RollingFile]-->
<!--Appender可以理解为日志的输出目的地-->
<appenders>
<!--console :控制台输出的配置-->
<Console name="console" target="SYSTEM_OUT">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
</Console>
<!--RollingRandomAccessFile性能比RollingFile提升官网宣称是20-200%-->
<RollingRandomAccessFile name="YZY.TRACE" immediateFlush="true" bufferSize="1024"
fileName="${logpath}/trace.log"
filePattern="${logpath}/trace.log.%d{yyyy-MM-dd}.gz">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<TimeBasedTriggeringPolicy/>
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="trace.log.*.gz"/>
<IfLastModified age="3d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="YZY.SYSTEM" immediateFlush="true" bufferSize="4096"
fileName="${logpath}/system.log"
filePattern="${logpath}/system.log.%d{yyyy-MM-dd}.gz"
ignoreExceptions="false">
<!--引用上面自定义的输出格式-->
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<Filters>
<!--ThresholdFilter :日志输出过滤-->
<!--level="info" :日志级别,onMatch="ACCEPT" :级别在info之上则接受,onMismatch="DENY" :级别在info之下则拒绝-->
<!--与logger、root中定义的日志级别相配合,相当于两个闸门,先判断logger、root的级别,符合了才会用到该filter中的level,此时再进行一次筛选-->
<ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY"/>
<!--<ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY"/>-->
<!--<ThresholdFilter level="INFO" onMatch="ACCEPT" onMismatch="DENY"/>-->
</Filters>
<!-- Policies :日志滚动策略-->
<Policies>
<!--<TimeBasedTriggeringPolicy interval="1" modulate="true"/>-->
<CronTriggeringPolicy schedule="0 0 2 * * ?" evaluateOnStartup="true"/>
</Policies>
<!-- DefaultRolloverStrategy属性如不设置,则默认为最多同一文件夹下7个文件-->
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="system.log.*.gz"/>
<!--只保留7天,超过则删除-->
<IfLastModified age="7d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="YZY.ERROR" immediateFlush="true" bufferSize="4096"
fileName="${logpath}/error.log"
filePattern="${logpath}/error.log.%d{yyyy-MM-dd}.gz"
ignoreExceptions="false">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<Filters>
<ThresholdFilter level="ERROR" onMatch="ACCEPT" onMismatch="DENY"/>
</Filters>
<TimeBasedTriggeringPolicy/>
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="error.log.*.gz"/>
<IfLastModified age="7d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="YZY.AUDIT" immediateFlush="false" bufferSize="8192"
fileName="${logpath}/audit.log"
filePattern="${logpath}/audit.log.%d{yyyy-MM-dd}.gz"
ignoreExceptions="false">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<Filters>
<ThresholdFilter level="WARN" onMatch="ACCEPT" onMismatch="DENY"/>
</Filters>
<TimeBasedTriggeringPolicy/>
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="audit.log.*.gz"/>
<IfLastModified age="7d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="YZY.POOL" immediateFlush="true" bufferSize="1024"
fileName="${logpath}/pool.log"
filePattern="${logpath}/pool.log.%d{yyyy-MM-dd}.gz"
ignoreExceptions="false">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<TimeBasedTriggeringPolicy/>
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="pool.log.*.gz"/>
<IfLastModified age="3d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="YZY.MONITOR" immediateFlush="true" bufferSize="1024"
fileName="${logpath}/monitor.log"
filePattern="${logpath}/pool.log.%d{yyyy-MM-dd}.gz"
ignoreExceptions="false">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<TimeBasedTriggeringPolicy/>
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="pool.log.*.gz"/>
<IfLastModified age="3d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
<RollingRandomAccessFile name="YZY.BIZ" immediateFlush="true"
fileName="${logpath}/biz.log"
filePattern="${logpath}/biz.log.%d{yyyy-MM-dd}.gz"
ignoreExceptions="false">
<PatternLayout pattern="${pattern}" charset="${charset}"/>
<TimeBasedTriggeringPolicy/>
<DefaultRolloverStrategy>
<Delete basePath="${logpath}" maxDepth="2" followLinks="true">
<IfFileName glob="biz.log.*.gz"/>
<IfLastModified age="7d"/>
</Delete>
</DefaultRolloverStrategy>
</RollingRandomAccessFile>
</appenders>

<!--然后定义logger,只有定义了logger并引入的appender,appender才会生效-->
<loggers>
<!--additivity="false"表示在该logger中输出的日志不会再延伸到父层logger。这里如果改为true,则会延伸到Root Logger,遵循Root Logger的配置也输出一次。-->
<Logger additivity="false" name="YZY.TRACE" level="INFO">
<AppenderRef ref="YZY.TRACE"/>
</Logger>
<Logger additivity="false" name="YZY.SYSTEM" level="INFO">
<AppenderRef ref="YZY.SYSTEM"/>
<AppenderRef ref="YZY.ERROR"/>
</Logger>
<Logger additivity="false" name="YZY.BIZ" level="INFO">
<AppenderRef ref="YZY.BIZ"/>
</Logger>
<!--Logger节点用来单独指定日志的形式,name为包路径,比如要为org.apache包下所有日志指定为INFO级别等。 -->
<Logger additivity="false" name="org.apache" level="INFO">
<AppenderRef ref="console"/>
</Logger>
<Logger additivity="false"
name="com.alibaba.dubbo.common.threadpool.monitor.MonitorPoolRunnable" level="INFO">
<AppenderRef ref="YZY.POOL"/>
</Logger>
<Logger additivity="false" name="com.alibaba.dubbo.monitor.dubbo.sfextend.SfMonitorExtend"
level="INFO">
<AppenderRef ref="YZY.MONITOR"/>
</Logger>
<!--针对,request以及reponse的信息配置输出级别,生产线请配置为error-->
<Logger additivity="true" name="com.alibaba.dubbo.rpc.protocol.rest.support" level="INFO">
<AppenderRef ref="console"/>
</Logger>
<!-- 在开发和测试环境启用,输出sql -->
<Logger additivity="true" name="com.YZY.mapper" level="DEBUG">
</Logger>
<!-- Root节点用来指定项目的根日志,如果没有单独指定Logger,那么就会默认使用该Root日志输出 -->
<Root level="DEBUG" includeLocation="true">
<AppenderRef ref="console"/>
<AppenderRef ref="YZY.SYSTEM"/>
<AppenderRef ref="YZY.ERROR"/>
<AppenderRef ref="YZY.AUDIT"/>
</Root>
</loggers>
</configuration>

四、性能对比结果

在这里插入图片描述

Welcome to my other publishing channels