Polish "Migrate size properties to DataSize"

Closes gh-14549
pull/14569/merge
Stephane Nicoll 6 years ago
parent eb9f635004
commit cbae22f0c9

@ -21,29 +21,27 @@ import java.io.File;
import org.springframework.boot.actuate.system.DiskSpaceHealthIndicator; import org.springframework.boot.actuate.system.DiskSpaceHealthIndicator;
import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.util.Assert; import org.springframework.util.Assert;
import org.springframework.util.unit.DataSize;
/** /**
* External configuration properties for {@link DiskSpaceHealthIndicator}. * External configuration properties for {@link DiskSpaceHealthIndicator}.
* *
* @author Andy Wilkinson * @author Andy Wilkinson
* @author Stephane Nicoll
* @since 1.2.0 * @since 1.2.0
*/ */
@ConfigurationProperties(prefix = "management.health.diskspace") @ConfigurationProperties(prefix = "management.health.diskspace")
public class DiskSpaceHealthIndicatorProperties { public class DiskSpaceHealthIndicatorProperties {
private static final int MEGABYTES = 1024 * 1024;
private static final int DEFAULT_THRESHOLD = 10 * MEGABYTES;
/** /**
* Path used to compute the available disk space. * Path used to compute the available disk space.
*/ */
private File path = new File("."); private File path = new File(".");
/** /**
* Minimum disk space, in bytes, that should be available. * Minimum disk space that should be available.
*/ */
private long threshold = DEFAULT_THRESHOLD; private DataSize threshold = DataSize.ofMegabytes(10);
public File getPath() { public File getPath() {
return this.path; return this.path;
@ -55,12 +53,12 @@ public class DiskSpaceHealthIndicatorProperties {
this.path = path; this.path = path;
} }
public long getThreshold() { public DataSize getThreshold() {
return this.threshold; return this.threshold;
} }
public void setThreshold(long threshold) { public void setThreshold(DataSize threshold) {
Assert.isTrue(threshold >= 0, "threshold must be greater than 0"); Assert.isTrue(!threshold.isNegative(), "threshold must be greater than 0");
this.threshold = threshold; this.threshold = threshold;
} }

@ -1,5 +1,5 @@
/* /*
* Copyright 2012-2017 the original author or authors. * Copyright 2012-2018 the original author or authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -18,11 +18,13 @@ package org.springframework.boot.actuate.autoconfigure.system;
import org.junit.Test; import org.junit.Test;
import org.springframework.beans.DirectFieldAccessor;
import org.springframework.boot.actuate.autoconfigure.health.HealthIndicatorAutoConfiguration; import org.springframework.boot.actuate.autoconfigure.health.HealthIndicatorAutoConfiguration;
import org.springframework.boot.actuate.health.ApplicationHealthIndicator; import org.springframework.boot.actuate.health.ApplicationHealthIndicator;
import org.springframework.boot.actuate.system.DiskSpaceHealthIndicator; import org.springframework.boot.actuate.system.DiskSpaceHealthIndicator;
import org.springframework.boot.autoconfigure.AutoConfigurations; import org.springframework.boot.autoconfigure.AutoConfigurations;
import org.springframework.boot.test.context.runner.ApplicationContextRunner; import org.springframework.boot.test.context.runner.ApplicationContextRunner;
import org.springframework.util.unit.DataSize;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
@ -30,6 +32,7 @@ import static org.assertj.core.api.Assertions.assertThat;
* Tests for {@link DiskSpaceHealthIndicatorAutoConfiguration}. * Tests for {@link DiskSpaceHealthIndicatorAutoConfiguration}.
* *
* @author Phillip Webb * @author Phillip Webb
* @author Stephane Nicoll
*/ */
public class DiskSpaceHealthIndicatorAutoConfigurationTests { public class DiskSpaceHealthIndicatorAutoConfigurationTests {
@ -45,6 +48,28 @@ public class DiskSpaceHealthIndicatorAutoConfigurationTests {
.doesNotHaveBean(ApplicationHealthIndicator.class)); .doesNotHaveBean(ApplicationHealthIndicator.class));
} }
@Test
public void thresholdMustBePositive() {
this.contextRunner
.withPropertyValues("management.health.diskspace.threshold=-10MB")
.run((context) -> assertThat(context).hasFailed().getFailure()
.hasMessageContaining(
"Failed to bind properties under 'management.health.diskspace'"));
}
@Test
public void thresholdCanBeCustomized() {
this.contextRunner
.withPropertyValues("management.health.diskspace.threshold=20MB")
.run((context) -> {
assertThat(context).hasSingleBean(DiskSpaceHealthIndicator.class);
DirectFieldAccessor dfa = new DirectFieldAccessor(
context.getBean(DiskSpaceHealthIndicator.class));
assertThat(dfa.getPropertyValue("threshold"))
.isEqualTo(DataSize.ofMegabytes(20));
});
}
@Test @Test
public void runWhenDisabledShouldNotCreateIndicator() { public void runWhenDisabledShouldNotCreateIndicator() {
this.contextRunner.withPropertyValues("management.health.diskspace.enabled:false") this.contextRunner.withPropertyValues("management.health.diskspace.enabled:false")

@ -25,6 +25,7 @@ import org.springframework.boot.actuate.health.AbstractHealthIndicator;
import org.springframework.boot.actuate.health.Health; import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator; import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.boot.actuate.health.Status; import org.springframework.boot.actuate.health.Status;
import org.springframework.util.unit.DataSize;
/** /**
* A {@link HealthIndicator} that checks available disk space and reports a status of * A {@link HealthIndicator} that checks available disk space and reports a status of
@ -32,6 +33,7 @@ import org.springframework.boot.actuate.health.Status;
* *
* @author Mattias Severson * @author Mattias Severson
* @author Andy Wilkinson * @author Andy Wilkinson
* @author Stephane Nicoll
* @since 2.0.0 * @since 2.0.0
*/ */
public class DiskSpaceHealthIndicator extends AbstractHealthIndicator { public class DiskSpaceHealthIndicator extends AbstractHealthIndicator {
@ -40,35 +42,47 @@ public class DiskSpaceHealthIndicator extends AbstractHealthIndicator {
private final File path; private final File path;
private final long threshold; private final DataSize threshold;
/** /**
* Create a new {@code DiskSpaceHealthIndicator} instance. * Create a new {@code DiskSpaceHealthIndicator} instance.
* @param path the Path used to compute the available disk space * @param path the Path used to compute the available disk space
* @param threshold the minimum disk space that should be available (in bytes) * @param threshold the minimum disk space that should be available
*/ */
public DiskSpaceHealthIndicator(File path, long threshold) { public DiskSpaceHealthIndicator(File path, DataSize threshold) {
super("DiskSpace health check failed"); super("DiskSpace health check failed");
this.path = path; this.path = path;
this.threshold = threshold; this.threshold = threshold;
} }
/**
* Create a new {@code DiskSpaceHealthIndicator} instance.
* @param path the Path used to compute the available disk space
* @param threshold the minimum disk space that should be available (in bytes)
* @deprecated since 2.1.0 in favour of
* {@link #DiskSpaceHealthIndicator(File, DataSize)}
*/
@Deprecated
public DiskSpaceHealthIndicator(File path, long threshold) {
this(path, DataSize.ofBytes(threshold));
}
@Override @Override
protected void doHealthCheck(Health.Builder builder) throws Exception { protected void doHealthCheck(Health.Builder builder) throws Exception {
long diskFreeInBytes = this.path.getUsableSpace(); long diskFreeInBytes = this.path.getUsableSpace();
if (diskFreeInBytes >= this.threshold) { if (diskFreeInBytes >= this.threshold.toBytes()) {
builder.up(); builder.up();
} }
else { else {
logger.warn(String.format( logger.warn(String.format(
"Free disk space below threshold. " "Free disk space below threshold. "
+ "Available: %d bytes (threshold: %d bytes)", + "Available: %d bytes (threshold: %s)",
diskFreeInBytes, this.threshold)); diskFreeInBytes, this.threshold));
builder.down(); builder.down();
} }
builder.withDetail("total", this.path.getTotalSpace()) builder.withDetail("total", this.path.getTotalSpace())
.withDetail("free", diskFreeInBytes) .withDetail("free", diskFreeInBytes)
.withDetail("threshold", this.threshold); .withDetail("threshold", this.threshold.toBytes());
} }
} }

@ -1,5 +1,5 @@
/* /*
* Copyright 2012-2017 the original author or authors. * Copyright 2012-2018 the original author or authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -28,6 +28,7 @@ import org.mockito.MockitoAnnotations;
import org.springframework.boot.actuate.health.Health; import org.springframework.boot.actuate.health.Health;
import org.springframework.boot.actuate.health.HealthIndicator; import org.springframework.boot.actuate.health.HealthIndicator;
import org.springframework.boot.actuate.health.Status; import org.springframework.boot.actuate.health.Status;
import org.springframework.util.unit.DataSize;
import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.BDDMockito.given; import static org.mockito.BDDMockito.given;
@ -36,10 +37,13 @@ import static org.mockito.BDDMockito.given;
* Tests for {@link DiskSpaceHealthIndicator}. * Tests for {@link DiskSpaceHealthIndicator}.
* *
* @author Mattias Severson * @author Mattias Severson
* @author Stephane Nicoll
*/ */
public class DiskSpaceHealthIndicatorTests { public class DiskSpaceHealthIndicatorTests {
static final long THRESHOLD_BYTES = 1024; private static final DataSize THRESHOLD = DataSize.ofKilobytes(1);
private static final DataSize TOTAL_SPACE = DataSize.ofKilobytes(10);
@Rule @Rule
public ExpectedException exception = ExpectedException.none(); public ExpectedException exception = ExpectedException.none();
@ -54,30 +58,31 @@ public class DiskSpaceHealthIndicatorTests {
MockitoAnnotations.initMocks(this); MockitoAnnotations.initMocks(this);
given(this.fileMock.exists()).willReturn(true); given(this.fileMock.exists()).willReturn(true);
given(this.fileMock.canRead()).willReturn(true); given(this.fileMock.canRead()).willReturn(true);
this.healthIndicator = new DiskSpaceHealthIndicator(this.fileMock, this.healthIndicator = new DiskSpaceHealthIndicator(this.fileMock, THRESHOLD);
THRESHOLD_BYTES);
} }
@Test @Test
public void diskSpaceIsUp() { public void diskSpaceIsUp() {
given(this.fileMock.getUsableSpace()).willReturn(THRESHOLD_BYTES + 10); long freeSpace = THRESHOLD.toBytes() + 10;
given(this.fileMock.getTotalSpace()).willReturn(THRESHOLD_BYTES * 10); given(this.fileMock.getUsableSpace()).willReturn(freeSpace);
given(this.fileMock.getTotalSpace()).willReturn(TOTAL_SPACE.toBytes());
Health health = this.healthIndicator.health(); Health health = this.healthIndicator.health();
assertThat(health.getStatus()).isEqualTo(Status.UP); assertThat(health.getStatus()).isEqualTo(Status.UP);
assertThat(health.getDetails().get("threshold")).isEqualTo(THRESHOLD_BYTES); assertThat(health.getDetails().get("threshold")).isEqualTo(THRESHOLD.toBytes());
assertThat(health.getDetails().get("free")).isEqualTo(THRESHOLD_BYTES + 10); assertThat(health.getDetails().get("free")).isEqualTo(freeSpace);
assertThat(health.getDetails().get("total")).isEqualTo(THRESHOLD_BYTES * 10); assertThat(health.getDetails().get("total")).isEqualTo(TOTAL_SPACE.toBytes());
} }
@Test @Test
public void diskSpaceIsDown() { public void diskSpaceIsDown() {
given(this.fileMock.getUsableSpace()).willReturn(THRESHOLD_BYTES - 10); long freeSpace = THRESHOLD.toBytes() - 10;
given(this.fileMock.getTotalSpace()).willReturn(THRESHOLD_BYTES * 10); given(this.fileMock.getUsableSpace()).willReturn(freeSpace);
given(this.fileMock.getTotalSpace()).willReturn(TOTAL_SPACE.toBytes());
Health health = this.healthIndicator.health(); Health health = this.healthIndicator.health();
assertThat(health.getStatus()).isEqualTo(Status.DOWN); assertThat(health.getStatus()).isEqualTo(Status.DOWN);
assertThat(health.getDetails().get("threshold")).isEqualTo(THRESHOLD_BYTES); assertThat(health.getDetails().get("threshold")).isEqualTo(THRESHOLD.toBytes());
assertThat(health.getDetails().get("free")).isEqualTo(THRESHOLD_BYTES - 10); assertThat(health.getDetails().get("free")).isEqualTo(freeSpace);
assertThat(health.getDetails().get("total")).isEqualTo(THRESHOLD_BYTES * 10); assertThat(health.getDetails().get("total")).isEqualTo(TOTAL_SPACE.toBytes());
} }
} }

@ -33,12 +33,14 @@ import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.context.properties.DeprecatedConfigurationProperty;
import org.springframework.boot.context.properties.PropertyMapper; import org.springframework.boot.context.properties.PropertyMapper;
import org.springframework.boot.convert.DurationUnit; import org.springframework.boot.convert.DurationUnit;
import org.springframework.core.io.Resource; import org.springframework.core.io.Resource;
import org.springframework.kafka.listener.ContainerProperties.AckMode; import org.springframework.kafka.listener.ContainerProperties.AckMode;
import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer; import org.springframework.kafka.security.jaas.KafkaJaasLoginModuleInitializer;
import org.springframework.util.CollectionUtils; import org.springframework.util.CollectionUtils;
import org.springframework.util.unit.DataSize;
/** /**
* Configuration properties for Spring for Apache Kafka. * Configuration properties for Spring for Apache Kafka.
@ -247,14 +249,14 @@ public class KafkaProperties {
/** /**
* Maximum amount of time the server blocks before answering the fetch request if * Maximum amount of time the server blocks before answering the fetch request if
* there isn't sufficient data to immediately satisfy the requirement given by * there isn't sufficient data to immediately satisfy the requirement given by
* "fetch.min.bytes". * "fetch-min-size".
*/ */
private Duration fetchMaxWait; private Duration fetchMaxWait;
/** /**
* Minimum amount of data, in bytes, the server should return for a fetch request. * Minimum amount of data the server should return for a fetch request.
*/ */
private Integer fetchMinSize; private DataSize fetchMinSize;
/** /**
* Unique string that identifies the consumer group to which this consumer * Unique string that identifies the consumer group to which this consumer
@ -339,11 +341,11 @@ public class KafkaProperties {
this.fetchMaxWait = fetchMaxWait; this.fetchMaxWait = fetchMaxWait;
} }
public Integer getFetchMinSize() { public DataSize getFetchMinSize() {
return this.fetchMinSize; return this.fetchMinSize;
} }
public void setFetchMinSize(Integer fetchMinSize) { public void setFetchMinSize(DataSize fetchMinSize) {
this.fetchMinSize = fetchMinSize; this.fetchMinSize = fetchMinSize;
} }
@ -406,7 +408,7 @@ public class KafkaProperties {
.to(properties.in(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)); .to(properties.in(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));
map.from(this::getFetchMaxWait).asInt(Duration::toMillis) map.from(this::getFetchMaxWait).asInt(Duration::toMillis)
.to(properties.in(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG)); .to(properties.in(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG));
map.from(this::getFetchMinSize) map.from(this::getFetchMinSize).asInt(DataSize::toBytes)
.to(properties.in(ConsumerConfig.FETCH_MIN_BYTES_CONFIG)); .to(properties.in(ConsumerConfig.FETCH_MIN_BYTES_CONFIG));
map.from(this::getGroupId).to(properties.in(ConsumerConfig.GROUP_ID_CONFIG)); map.from(this::getGroupId).to(properties.in(ConsumerConfig.GROUP_ID_CONFIG));
map.from(this::getHeartbeatInterval).asInt(Duration::toMillis) map.from(this::getHeartbeatInterval).asInt(Duration::toMillis)
@ -433,10 +435,10 @@ public class KafkaProperties {
private String acks; private String acks;
/** /**
* Default batch size in bytes. A small batch size will make batching less common * Default batch size. A small batch size will make batching less common and may
* and may reduce throughput (a batch size of zero disables batching entirely). * reduce throughput (a batch size of zero disables batching entirely).
*/ */
private Integer batchSize; private DataSize batchSize;
/** /**
* Comma-delimited list of host:port pairs to use for establishing the initial * Comma-delimited list of host:port pairs to use for establishing the initial
@ -445,10 +447,10 @@ public class KafkaProperties {
private List<String> bootstrapServers; private List<String> bootstrapServers;
/** /**
* Total bytes of memory the producer can use to buffer records waiting to be sent * Total memory size the producer can use to buffer records waiting to be sent to
* to the server. * the server.
*/ */
private Long bufferMemory; private DataSize bufferMemory;
/** /**
* ID to pass to the server when making requests. Used for server-side logging. * ID to pass to the server when making requests. Used for server-side logging.
@ -497,11 +499,11 @@ public class KafkaProperties {
this.acks = acks; this.acks = acks;
} }
public Integer getBatchSize() { public DataSize getBatchSize() {
return this.batchSize; return this.batchSize;
} }
public void setBatchSize(Integer batchSize) { public void setBatchSize(DataSize batchSize) {
this.batchSize = batchSize; this.batchSize = batchSize;
} }
@ -513,11 +515,11 @@ public class KafkaProperties {
this.bootstrapServers = bootstrapServers; this.bootstrapServers = bootstrapServers;
} }
public Long getBufferMemory() { public DataSize getBufferMemory() {
return this.bufferMemory; return this.bufferMemory;
} }
public void setBufferMemory(Long bufferMemory) { public void setBufferMemory(DataSize bufferMemory) {
this.bufferMemory = bufferMemory; this.bufferMemory = bufferMemory;
} }
@ -577,11 +579,11 @@ public class KafkaProperties {
Properties properties = new Properties(); Properties properties = new Properties();
PropertyMapper map = PropertyMapper.get().alwaysApplyingWhenNonNull(); PropertyMapper map = PropertyMapper.get().alwaysApplyingWhenNonNull();
map.from(this::getAcks).to(properties.in(ProducerConfig.ACKS_CONFIG)); map.from(this::getAcks).to(properties.in(ProducerConfig.ACKS_CONFIG));
map.from(this::getBatchSize) map.from(this::getBatchSize).asInt(DataSize::toBytes)
.to(properties.in(ProducerConfig.BATCH_SIZE_CONFIG)); .to(properties.in(ProducerConfig.BATCH_SIZE_CONFIG));
map.from(this::getBootstrapServers) map.from(this::getBootstrapServers)
.to(properties.in(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)); .to(properties.in(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG));
map.from(this::getBufferMemory) map.from(this::getBufferMemory).as(DataSize::toBytes)
.to(properties.in(ProducerConfig.BUFFER_MEMORY_CONFIG)); .to(properties.in(ProducerConfig.BUFFER_MEMORY_CONFIG));
map.from(this::getClientId) map.from(this::getClientId)
.to(properties.in(ProducerConfig.CLIENT_ID_CONFIG)); .to(properties.in(ProducerConfig.CLIENT_ID_CONFIG));
@ -674,9 +676,9 @@ public class KafkaProperties {
private List<String> bootstrapServers; private List<String> bootstrapServers;
/** /**
* Maximum number of memory bytes to be used for buffering across all threads. * Maximum memory size to be used for buffering across all threads.
*/ */
private Integer cacheMaxBytesBuffering; private DataSize cacheMaxSizeBuffering;
/** /**
* ID to pass to the server when making requests. Used for server-side logging. * ID to pass to the server when making requests. Used for server-side logging.
@ -727,12 +729,26 @@ public class KafkaProperties {
this.bootstrapServers = bootstrapServers; this.bootstrapServers = bootstrapServers;
} }
@DeprecatedConfigurationProperty(replacement = "spring.kafka.streams.cache-max-size-buffering")
@Deprecated
public Integer getCacheMaxBytesBuffering() { public Integer getCacheMaxBytesBuffering() {
return this.cacheMaxBytesBuffering; return (this.cacheMaxSizeBuffering != null)
? (int) this.cacheMaxSizeBuffering.toBytes() : null;
} }
@Deprecated
public void setCacheMaxBytesBuffering(Integer cacheMaxBytesBuffering) { public void setCacheMaxBytesBuffering(Integer cacheMaxBytesBuffering) {
this.cacheMaxBytesBuffering = cacheMaxBytesBuffering; DataSize cacheMaxSizeBuffering = (cacheMaxBytesBuffering != null)
? DataSize.ofBytes(cacheMaxBytesBuffering) : null;
setCacheMaxSizeBuffering(cacheMaxSizeBuffering);
}
public DataSize getCacheMaxSizeBuffering() {
return this.cacheMaxSizeBuffering;
}
public void setCacheMaxSizeBuffering(DataSize cacheMaxSizeBuffering) {
this.cacheMaxSizeBuffering = cacheMaxSizeBuffering;
} }
public String getClientId() { public String getClientId() {
@ -769,7 +785,7 @@ public class KafkaProperties {
map.from(this::getApplicationId).to(properties.in("application.id")); map.from(this::getApplicationId).to(properties.in("application.id"));
map.from(this::getBootstrapServers) map.from(this::getBootstrapServers)
.to(properties.in(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG)); .to(properties.in(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG));
map.from(this::getCacheMaxBytesBuffering) map.from(this::getCacheMaxSizeBuffering).asInt(DataSize::toBytes)
.to(properties.in("cache.max.bytes.buffering")); .to(properties.in("cache.max.bytes.buffering"));
map.from(this::getClientId) map.from(this::getClientId)
.to(properties.in(CommonClientConfigs.CLIENT_ID_CONFIG)); .to(properties.in(CommonClientConfigs.CLIENT_ID_CONFIG));

@ -134,7 +134,8 @@ public class EmbeddedMongoAutoConfiguration {
if (storage != null) { if (storage != null) {
String databaseDir = storage.getDatabaseDir(); String databaseDir = storage.getDatabaseDir();
String replSetName = storage.getReplSetName(); String replSetName = storage.getReplSetName();
int oplogSize = (storage.getOplogSize() != null) ? storage.getOplogSize() : 0; int oplogSize = (storage.getOplogSize() != null)
? (int) storage.getOplogSize().toMegabytes() : 0;
builder.replication(new Storage(databaseDir, replSetName, oplogSize)); builder.replication(new Storage(databaseDir, replSetName, oplogSize));
} }
Integer configuredPort = this.properties.getPort(); Integer configuredPort = this.properties.getPort();

@ -1,5 +1,5 @@
/* /*
* Copyright 2012-2017 the original author or authors. * Copyright 2012-2018 the original author or authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -23,6 +23,9 @@ import java.util.Set;
import de.flapdoodle.embed.mongo.distribution.Feature; import de.flapdoodle.embed.mongo.distribution.Feature;
import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.convert.DataSizeUnit;
import org.springframework.util.unit.DataSize;
import org.springframework.util.unit.DataUnit;
/** /**
* Configuration properties for Embedded Mongo. * Configuration properties for Embedded Mongo.
@ -70,9 +73,10 @@ public class EmbeddedMongoProperties {
public static class Storage { public static class Storage {
/** /**
* Maximum size of the oplog, in megabytes. * Maximum size of the oplog.
*/ */
private Integer oplogSize; @DataSizeUnit(DataUnit.MEGABYTES)
private DataSize oplogSize;
/** /**
* Name of the replica set. * Name of the replica set.
@ -84,11 +88,11 @@ public class EmbeddedMongoProperties {
*/ */
private String databaseDir; private String databaseDir;
public Integer getOplogSize() { public DataSize getOplogSize() {
return this.oplogSize; return this.oplogSize;
} }
public void setOplogSize(Integer oplogSize) { public void setOplogSize(DataSize oplogSize) {
this.oplogSize = oplogSize; this.oplogSize = oplogSize;
} }

@ -57,6 +57,7 @@ import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Configuration;
import org.springframework.core.Ordered; import org.springframework.core.Ordered;
import org.springframework.util.MimeType; import org.springframework.util.MimeType;
import org.springframework.util.unit.DataSize;
import org.springframework.web.servlet.resource.ResourceUrlEncodingFilter; import org.springframework.web.servlet.resource.ResourceUrlEncodingFilter;
/** /**
@ -283,8 +284,8 @@ public class ThymeleafAutoConfiguration {
PropertyMapper map = PropertyMapper.get(); PropertyMapper map = PropertyMapper.get();
map.from(properties::getMediaTypes).whenNonNull() map.from(properties::getMediaTypes).whenNonNull()
.to(resolver::setSupportedMediaTypes); .to(resolver::setSupportedMediaTypes);
map.from(properties::getMaxChunkSize).when((size) -> size > 0) map.from(properties::getMaxChunkSize).asInt(DataSize::toBytes)
.to(resolver::setResponseMaxChunkSizeBytes); .when((size) -> size > 0).to(resolver::setResponseMaxChunkSizeBytes);
map.from(properties::getFullModeViewNames).to(resolver::setFullModeViewNames); map.from(properties::getFullModeViewNames).to(resolver::setFullModeViewNames);
map.from(properties::getChunkedModeViewNames) map.from(properties::getChunkedModeViewNames)
.to(resolver::setChunkedModeViewNames); .to(resolver::setChunkedModeViewNames);

@ -1,5 +1,5 @@
/* /*
* Copyright 2012-2017 the original author or authors. * Copyright 2012-2018 the original author or authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -23,6 +23,7 @@ import java.util.List;
import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.http.MediaType; import org.springframework.http.MediaType;
import org.springframework.util.MimeType; import org.springframework.util.MimeType;
import org.springframework.util.unit.DataSize;
/** /**
* Properties for Thymeleaf. * Properties for Thymeleaf.
@ -233,10 +234,10 @@ public class ThymeleafProperties {
public static class Reactive { public static class Reactive {
/** /**
* Maximum size of data buffers used for writing to the response, in bytes. * Maximum size of data buffers used for writing to the response. Templates will
* Templates will execute in CHUNKED mode by default if this is set. * execute in CHUNKED mode by default if this is set.
*/ */
private int maxChunkSize; private DataSize maxChunkSize = DataSize.ofBytes(0);
/** /**
* Media types supported by the view technology. * Media types supported by the view technology.
@ -263,11 +264,11 @@ public class ThymeleafProperties {
this.mediaTypes = mediaTypes; this.mediaTypes = mediaTypes;
} }
public int getMaxChunkSize() { public DataSize getMaxChunkSize() {
return this.maxChunkSize; return this.maxChunkSize;
} }
public void setMaxChunkSize(int maxChunkSize) { public void setMaxChunkSize(DataSize maxChunkSize) {
this.maxChunkSize = maxChunkSize; this.maxChunkSize = maxChunkSize;
} }

@ -946,7 +946,7 @@ public class ServerProperties {
* Size of each buffer. The default is derived from the maximum amount of memory * Size of each buffer. The default is derived from the maximum amount of memory
* that is available to the JVM. * that is available to the JVM.
*/ */
private DataSize bufferSize = DataSize.ofBytes(0); private DataSize bufferSize;
/** /**
* Number of I/O threads to create for the worker. The default is derived from the * Number of I/O threads to create for the worker. The default is derived from the

@ -78,8 +78,8 @@ public class JettyWebServerFactoryCustomizer implements
.asInt(DataSize::toBytes) .asInt(DataSize::toBytes)
.to((maxHttpHeaderSize) -> customizeMaxHttpHeaderSize(factory, .to((maxHttpHeaderSize) -> customizeMaxHttpHeaderSize(factory,
maxHttpHeaderSize)); maxHttpHeaderSize));
propertyMapper.from(jettyProperties::getMaxHttpPostSize).whenNonNull() propertyMapper.from(jettyProperties::getMaxHttpPostSize).asInt(DataSize::toBytes)
.asInt(DataSize::toBytes) .when(this::isPositive)
.to((maxHttpPostSize) -> customizeMaxHttpPostSize(factory, .to((maxHttpPostSize) -> customizeMaxHttpPostSize(factory,
maxHttpPostSize)); maxHttpPostSize));
propertyMapper.from(properties::getConnectionTimeout).whenNonNull() propertyMapper.from(properties::getConnectionTimeout).whenNonNull()

@ -92,8 +92,8 @@ public class TomcatWebServerFactoryCustomizer implements
propertyMapper.from(tomcatProperties::getMaxSwallowSize).whenNonNull() propertyMapper.from(tomcatProperties::getMaxSwallowSize).whenNonNull()
.asInt(DataSize::toBytes) .asInt(DataSize::toBytes)
.to((maxSwallowSize) -> customizeMaxSwallowSize(factory, maxSwallowSize)); .to((maxSwallowSize) -> customizeMaxSwallowSize(factory, maxSwallowSize));
propertyMapper.from(tomcatProperties::getMaxHttpPostSize).whenNonNull() propertyMapper.from(tomcatProperties::getMaxHttpPostSize).asInt(DataSize::toBytes)
.asInt(DataSize::toBytes) .when((maxHttpPostSize) -> maxHttpPostSize != 0)
.to((maxHttpPostSize) -> customizeMaxHttpPostSize(factory, .to((maxHttpPostSize) -> customizeMaxHttpPostSize(factory,
maxHttpPostSize)); maxHttpPostSize));
propertyMapper.from(tomcatProperties::getAccesslog) propertyMapper.from(tomcatProperties::getAccesslog)

@ -89,8 +89,8 @@ public class UndertowWebServerFactoryCustomizer implements
.asInt(DataSize::toBytes) .asInt(DataSize::toBytes)
.to((maxHttpHeaderSize) -> customizeMaxHttpHeaderSize(factory, .to((maxHttpHeaderSize) -> customizeMaxHttpHeaderSize(factory,
maxHttpHeaderSize)); maxHttpHeaderSize));
propertyMapper.from(undertowProperties::getMaxHttpPostSize).whenNonNull() propertyMapper.from(undertowProperties::getMaxHttpPostSize)
.asInt(DataSize::toBytes) .asInt(DataSize::toBytes).when(this::isPositive)
.to((maxHttpPostSize) -> customizeMaxHttpPostSize(factory, .to((maxHttpPostSize) -> customizeMaxHttpPostSize(factory,
maxHttpPostSize)); maxHttpPostSize));
propertyMapper.from(properties::getConnectionTimeout) propertyMapper.from(properties::getConnectionTimeout)

@ -29,6 +29,12 @@
"level": "error" "level": "error"
} }
}, },
{
"name": "server.compression.min-response-size",
"description": "Minimum \"Content-Length\" value that is required for compression to be performed.",
"type": "org.springframework.util.unit.DataSize",
"defaultValue": "2KB"
},
{ {
"name": "server.error.include-stacktrace", "name": "server.error.include-stacktrace",
"defaultValue": "never" "defaultValue": "never"

@ -100,7 +100,7 @@ public class KafkaAutoConfigurationTests {
"spring.kafka.consumer.enable-auto-commit=false", "spring.kafka.consumer.enable-auto-commit=false",
"spring.kafka.consumer.fetch-max-wait=456", "spring.kafka.consumer.fetch-max-wait=456",
"spring.kafka.consumer.properties.fiz.buz=fix.fox", "spring.kafka.consumer.properties.fiz.buz=fix.fox",
"spring.kafka.consumer.fetch-min-size=789", "spring.kafka.consumer.fetch-min-size=1KB",
"spring.kafka.consumer.group-id=bar", "spring.kafka.consumer.group-id=bar",
"spring.kafka.consumer.heartbeat-interval=234", "spring.kafka.consumer.heartbeat-interval=234",
"spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.LongDeserializer", "spring.kafka.consumer.key-deserializer = org.apache.kafka.common.serialization.LongDeserializer",
@ -143,7 +143,7 @@ public class KafkaAutoConfigurationTests {
assertThat(configs.get(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG)) assertThat(configs.get(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG))
.isEqualTo(456); .isEqualTo(456);
assertThat(configs.get(ConsumerConfig.FETCH_MIN_BYTES_CONFIG)) assertThat(configs.get(ConsumerConfig.FETCH_MIN_BYTES_CONFIG))
.isEqualTo(789); .isEqualTo(1024);
assertThat(configs.get(ConsumerConfig.GROUP_ID_CONFIG)) assertThat(configs.get(ConsumerConfig.GROUP_ID_CONFIG))
.isEqualTo("bar"); .isEqualTo("bar");
assertThat(configs.get(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG)) assertThat(configs.get(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG))
@ -166,10 +166,10 @@ public class KafkaAutoConfigurationTests {
public void producerProperties() { public void producerProperties() {
this.contextRunner.withPropertyValues("spring.kafka.clientId=cid", this.contextRunner.withPropertyValues("spring.kafka.clientId=cid",
"spring.kafka.properties.foo.bar.baz=qux.fiz.buz", "spring.kafka.properties.foo.bar.baz=qux.fiz.buz",
"spring.kafka.producer.acks=all", "spring.kafka.producer.batch-size=20", "spring.kafka.producer.acks=all", "spring.kafka.producer.batch-size=2KB",
"spring.kafka.producer.bootstrap-servers=bar:1234", // test "spring.kafka.producer.bootstrap-servers=bar:1234", // test
// override // override
"spring.kafka.producer.buffer-memory=12345", "spring.kafka.producer.buffer-memory=4KB",
"spring.kafka.producer.compression-type=gzip", "spring.kafka.producer.compression-type=gzip",
"spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.LongSerializer", "spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.LongSerializer",
"spring.kafka.producer.retries=2", "spring.kafka.producer.retries=2",
@ -194,11 +194,11 @@ public class KafkaAutoConfigurationTests {
// producer // producer
assertThat(configs.get(ProducerConfig.ACKS_CONFIG)).isEqualTo("all"); assertThat(configs.get(ProducerConfig.ACKS_CONFIG)).isEqualTo("all");
assertThat(configs.get(ProducerConfig.BATCH_SIZE_CONFIG)) assertThat(configs.get(ProducerConfig.BATCH_SIZE_CONFIG))
.isEqualTo(20); .isEqualTo(2048);
assertThat(configs.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) assertThat(configs.get(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG))
.isEqualTo(Collections.singletonList("bar:1234")); // override .isEqualTo(Collections.singletonList("bar:1234")); // override
assertThat(configs.get(ProducerConfig.BUFFER_MEMORY_CONFIG)) assertThat(configs.get(ProducerConfig.BUFFER_MEMORY_CONFIG))
.isEqualTo(12345L); .isEqualTo(4096L);
assertThat(configs.get(ProducerConfig.COMPRESSION_TYPE_CONFIG)) assertThat(configs.get(ProducerConfig.COMPRESSION_TYPE_CONFIG))
.isEqualTo("gzip"); .isEqualTo("gzip");
assertThat(configs.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) assertThat(configs.get(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG))
@ -290,7 +290,7 @@ public class KafkaAutoConfigurationTests {
"spring.application.name=appName", "spring.application.name=appName",
"spring.kafka.properties.foo.bar.baz=qux.fiz.buz", "spring.kafka.properties.foo.bar.baz=qux.fiz.buz",
"spring.kafka.streams.auto-startup=false", "spring.kafka.streams.auto-startup=false",
"spring.kafka.streams.cache-max-bytes-buffering=42", "spring.kafka.streams.cache-max-size-buffering=1KB",
"spring.kafka.streams.client-id=override", "spring.kafka.streams.client-id=override",
"spring.kafka.streams.properties.fiz.buz=fix.fox", "spring.kafka.streams.properties.fiz.buz=fix.fox",
"spring.kafka.streams.replication-factor=2", "spring.kafka.streams.replication-factor=2",
@ -311,7 +311,7 @@ public class KafkaAutoConfigurationTests {
.isEqualTo("localhost:9092, localhost:9093"); .isEqualTo("localhost:9092, localhost:9093");
assertThat( assertThat(
configs.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG)) configs.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG))
.isEqualTo("42"); .isEqualTo("1024");
assertThat(configs.get(StreamsConfig.CLIENT_ID_CONFIG)) assertThat(configs.get(StreamsConfig.CLIENT_ID_CONFIG))
.isEqualTo("override"); .isEqualTo("override");
assertThat(configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG)) assertThat(configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG))
@ -347,6 +347,22 @@ public class KafkaAutoConfigurationTests {
}); });
} }
@Test
@Deprecated
public void streamPropertiesWithCustomCacheMaxBytesBuffering() {
this.contextRunner.withUserConfiguration(EnableKafkaStreamsConfiguration.class)
.withPropertyValues("spring.application.name=appName",
"spring.kafka.streams.cache-max-bytes-buffering=42")
.run((context) -> {
Properties configs = context.getBean(
KafkaStreamsDefaultConfiguration.DEFAULT_STREAMS_CONFIG_BEAN_NAME,
KafkaStreamsConfiguration.class).asProperties();
assertThat(
configs.get(StreamsConfig.CACHE_MAX_BYTES_BUFFERING_CONFIG))
.isEqualTo("42");
});
}
@Test @Test
public void streamsApplicationIdUsesMainApplicationNameByDefault() { public void streamsApplicationIdUsesMainApplicationNameByDefault() {
this.contextRunner.withUserConfiguration(EnableKafkaStreamsConfiguration.class) this.contextRunner.withUserConfiguration(EnableKafkaStreamsConfiguration.class)

@ -1,5 +1,5 @@
/* /*
* Copyright 2012-2017 the original author or authors. * Copyright 2012-2018 the original author or authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -139,6 +139,13 @@ public class EmbeddedMongoAutoConfigurationTests {
@Test @Test
public void customOpLogSizeIsAppliedToConfiguration() { public void customOpLogSizeIsAppliedToConfiguration() {
load("spring.mongodb.embedded.storage.oplogSize=1024KB");
assertThat(this.context.getBean(IMongodConfig.class).replication().getOplogSize())
.isEqualTo(1);
}
@Test
public void customOpLogSizeUsesMegabytesPerDefault() {
load("spring.mongodb.embedded.storage.oplogSize=10"); load("spring.mongodb.embedded.storage.oplogSize=10");
assertThat(this.context.getBean(IMongodConfig.class).replication().getOplogSize()) assertThat(this.context.getBean(IMongodConfig.class).replication().getOplogSize())
.isEqualTo(10); .isEqualTo(10);

@ -1,5 +1,5 @@
/* /*
* Copyright 2012-2017 the original author or authors. * Copyright 2012-2018 the original author or authors.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@ -116,7 +116,7 @@ public class ThymeleafReactiveAutoConfigurationTests {
@Test @Test
public void overrideMaxChunkSize() { public void overrideMaxChunkSize() {
load(BaseConfiguration.class, "spring.thymeleaf.reactive.maxChunkSize:8192"); load(BaseConfiguration.class, "spring.thymeleaf.reactive.maxChunkSize:8KB");
ThymeleafReactiveViewResolver views = this.context ThymeleafReactiveViewResolver views = this.context
.getBean(ThymeleafReactiveViewResolver.class); .getBean(ThymeleafReactiveViewResolver.class);
assertThat(views.getResponseMaxChunkSizeBytes()).isEqualTo(Integer.valueOf(8192)); assertThat(views.getResponseMaxChunkSizeBytes()).isEqualTo(Integer.valueOf(8192));

@ -183,7 +183,7 @@ content into your application. Rather, pick only the properties that you need.
server.compression.enabled=false # Whether response compression is enabled. server.compression.enabled=false # Whether response compression is enabled.
server.compression.excluded-user-agents= # List of user-agents to exclude from compression. server.compression.excluded-user-agents= # List of user-agents to exclude from compression.
server.compression.mime-types=text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml # Comma-separated list of MIME types that should be compressed. server.compression.mime-types=text/html,text/xml,text/plain,text/css,text/javascript,application/javascript,application/json,application/xml # Comma-separated list of MIME types that should be compressed.
server.compression.min-response-size=2048 # Minimum "Content-Length" value that is required for compression to be performed. server.compression.min-response-size=2KB # Minimum "Content-Length" value that is required for compression to be performed.
server.connection-timeout= # Time that connectors wait for another HTTP request before closing the connection. When not set, the connector's container-specific default is used. Use a value of -1 to indicate no (that is, an infinite) timeout. server.connection-timeout= # Time that connectors wait for another HTTP request before closing the connection. When not set, the connector's container-specific default is used. Use a value of -1 to indicate no (that is, an infinite) timeout.
server.error.include-exception=false # Include the "exception" attribute. server.error.include-exception=false # Include the "exception" attribute.
server.error.include-stacktrace=never # When to include a "stacktrace" attribute. server.error.include-stacktrace=never # When to include a "stacktrace" attribute.
@ -203,7 +203,7 @@ content into your application. Rather, pick only the properties that you need.
server.jetty.accesslog.log-server=false # Enable logging of the request hostname. server.jetty.accesslog.log-server=false # Enable logging of the request hostname.
server.jetty.accesslog.retention-period=31 # Number of days before rotated log files are deleted. server.jetty.accesslog.retention-period=31 # Number of days before rotated log files are deleted.
server.jetty.accesslog.time-zone=GMT # Timezone of the request log. server.jetty.accesslog.time-zone=GMT # Timezone of the request log.
server.jetty.max-http-post-size=200000 # Maximum size, in bytes, of the HTTP post or put content. server.jetty.max-http-post-size=200000B # Maximum size of the HTTP post or put content.
server.jetty.selectors=-1 # Number of selector threads to use. When the value is -1, the default, the number of selectors is derived from the operating environment. server.jetty.selectors=-1 # Number of selector threads to use. When the value is -1, the default, the number of selectors is derived from the operating environment.
server.max-http-header-size=8KB # Maximum size of the HTTP message header. server.max-http-header-size=8KB # Maximum size of the HTTP message header.
server.port=8080 # Server HTTP port. server.port=8080 # Server HTTP port.
@ -282,11 +282,11 @@ content into your application. Rather, pick only the properties that you need.
server.undertow.accesslog.prefix=access_log. # Log file name prefix. server.undertow.accesslog.prefix=access_log. # Log file name prefix.
server.undertow.accesslog.rotate=true # Whether to enable access log rotation. server.undertow.accesslog.rotate=true # Whether to enable access log rotation.
server.undertow.accesslog.suffix=log # Log file name suffix. server.undertow.accesslog.suffix=log # Log file name suffix.
server.undertow.buffer-size=0 # Size of each buffer, in bytes. server.undertow.buffer-size= # Size of each buffer.
server.undertow.direct-buffers= # Allocate buffers outside the Java heap. The default is derived from the maximum amount of memory that is available to the JVM. server.undertow.direct-buffers= # Allocate buffers outside the Java heap. The default is derived from the maximum amount of memory that is available to the JVM.
server.undertow.eager-filter-init=true # Whether servlet filters should be initialized on startup. server.undertow.eager-filter-init=true # Whether servlet filters should be initialized on startup.
server.undertow.io-threads= # Number of I/O threads to create for the worker. The default is derived from the number of available processors. server.undertow.io-threads= # Number of I/O threads to create for the worker. The default is derived from the number of available processors.
server.undertow.max-http-post-size=-1 # Maximum size in bytes of the HTTP post content. When the value is -1, the default, the size is unlimited. server.undertow.max-http-post-size=-1B # Maximum size of the HTTP post content. When the value is -1, the default, the size is unlimited.
server.undertow.worker-threads= # Number of worker threads. The default is 8 times the number of I/O threads. server.undertow.worker-threads= # Number of worker threads. The default is 8 times the number of I/O threads.
# FREEMARKER ({sc-spring-boot-autoconfigure}/freemarker/FreeMarkerProperties.{sc-ext}[FreeMarkerProperties]) # FREEMARKER ({sc-spring-boot-autoconfigure}/freemarker/FreeMarkerProperties.{sc-ext}[FreeMarkerProperties])
@ -501,7 +501,7 @@ content into your application. Rather, pick only the properties that you need.
spring.thymeleaf.prefix=classpath:/templates/ # Prefix that gets prepended to view names when building a URL. spring.thymeleaf.prefix=classpath:/templates/ # Prefix that gets prepended to view names when building a URL.
spring.thymeleaf.reactive.chunked-mode-view-names= # Comma-separated list of view names (patterns allowed) that should be the only ones executed in CHUNKED mode when a max chunk size is set. spring.thymeleaf.reactive.chunked-mode-view-names= # Comma-separated list of view names (patterns allowed) that should be the only ones executed in CHUNKED mode when a max chunk size is set.
spring.thymeleaf.reactive.full-mode-view-names= # Comma-separated list of view names (patterns allowed) that should be executed in FULL mode even if a max chunk size is set. spring.thymeleaf.reactive.full-mode-view-names= # Comma-separated list of view names (patterns allowed) that should be executed in FULL mode even if a max chunk size is set.
spring.thymeleaf.reactive.max-chunk-size=0 # Maximum size of data buffers used for writing to the response, in bytes. spring.thymeleaf.reactive.max-chunk-size=0B # Maximum size of data buffers used for writing to the response.
spring.thymeleaf.reactive.media-types= # Media types supported by the view technology. spring.thymeleaf.reactive.media-types= # Media types supported by the view technology.
spring.thymeleaf.servlet.content-type=text/html # Content-Type value written to HTTP responses. spring.thymeleaf.servlet.content-type=text/html # Content-Type value written to HTTP responses.
spring.thymeleaf.suffix=.html # Suffix that gets appended to view names when building a URL. spring.thymeleaf.suffix=.html # Suffix that gets appended to view names when building a URL.
@ -920,7 +920,7 @@ content into your application. Rather, pick only the properties that you need.
# EMBEDDED MONGODB ({sc-spring-boot-autoconfigure}/mongo/embedded/EmbeddedMongoProperties.{sc-ext}[EmbeddedMongoProperties]) # EMBEDDED MONGODB ({sc-spring-boot-autoconfigure}/mongo/embedded/EmbeddedMongoProperties.{sc-ext}[EmbeddedMongoProperties])
spring.mongodb.embedded.features=sync_delay # Comma-separated list of features to enable. spring.mongodb.embedded.features=sync_delay # Comma-separated list of features to enable.
spring.mongodb.embedded.storage.database-dir= # Directory used for data storage. spring.mongodb.embedded.storage.database-dir= # Directory used for data storage.
spring.mongodb.embedded.storage.oplog-size= # Maximum size of the oplog, in megabytes. spring.mongodb.embedded.storage.oplog-size= # Maximum size of the oplog.
spring.mongodb.embedded.storage.repl-set-name= # Name of the replica set. spring.mongodb.embedded.storage.repl-set-name= # Name of the replica set.
spring.mongodb.embedded.version=3.2.2 # Version of Mongo to use. spring.mongodb.embedded.version=3.2.2 # Version of Mongo to use.
@ -1046,8 +1046,8 @@ content into your application. Rather, pick only the properties that you need.
spring.kafka.consumer.bootstrap-servers= # Comma-delimited list of host:port pairs to use for establishing the initial connections to the Kafka cluster. Overrides the global property, for consumers. spring.kafka.consumer.bootstrap-servers= # Comma-delimited list of host:port pairs to use for establishing the initial connections to the Kafka cluster. Overrides the global property, for consumers.
spring.kafka.consumer.client-id= # ID to pass to the server when making requests. Used for server-side logging. spring.kafka.consumer.client-id= # ID to pass to the server when making requests. Used for server-side logging.
spring.kafka.consumer.enable-auto-commit= # Whether the consumer's offset is periodically committed in the background. spring.kafka.consumer.enable-auto-commit= # Whether the consumer's offset is periodically committed in the background.
spring.kafka.consumer.fetch-max-wait= # Maximum amount of time the server blocks before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by "fetch.min.bytes". spring.kafka.consumer.fetch-max-wait= # Maximum amount of time the server blocks before answering the fetch request if there isn't sufficient data to immediately satisfy the requirement given by "fetch-min-size".
spring.kafka.consumer.fetch-min-size= # Minimum amount of data, in bytes, the server should return for a fetch request. spring.kafka.consumer.fetch-min-size= # Minimum amount of data the server should return for a fetch request.
spring.kafka.consumer.group-id= # Unique string that identifies the consumer group to which this consumer belongs. spring.kafka.consumer.group-id= # Unique string that identifies the consumer group to which this consumer belongs.
spring.kafka.consumer.heartbeat-interval= # Expected time between heartbeats to the consumer coordinator. spring.kafka.consumer.heartbeat-interval= # Expected time between heartbeats to the consumer coordinator.
spring.kafka.consumer.key-deserializer= # Deserializer class for keys. spring.kafka.consumer.key-deserializer= # Deserializer class for keys.
@ -1078,9 +1078,9 @@ content into your application. Rather, pick only the properties that you need.
spring.kafka.listener.poll-timeout= # Timeout to use when polling the consumer. spring.kafka.listener.poll-timeout= # Timeout to use when polling the consumer.
spring.kafka.listener.type=single # Listener type. spring.kafka.listener.type=single # Listener type.
spring.kafka.producer.acks= # Number of acknowledgments the producer requires the leader to have received before considering a request complete. spring.kafka.producer.acks= # Number of acknowledgments the producer requires the leader to have received before considering a request complete.
spring.kafka.producer.batch-size= # Default batch size in bytes. spring.kafka.producer.batch-size= # Default batch size.
spring.kafka.producer.bootstrap-servers= # Comma-delimited list of host:port pairs to use for establishing the initial connections to the Kafka cluster. Overrides the global property, for producers. spring.kafka.producer.bootstrap-servers= # Comma-delimited list of host:port pairs to use for establishing the initial connections to the Kafka cluster. Overrides the global property, for producers.
spring.kafka.producer.buffer-memory= # Total bytes of memory the producer can use to buffer records waiting to be sent to the server. spring.kafka.producer.buffer-memory= # Total memory size the producer can use to buffer records waiting to be sent to the server.
spring.kafka.producer.client-id= # ID to pass to the server when making requests. Used for server-side logging. spring.kafka.producer.client-id= # ID to pass to the server when making requests. Used for server-side logging.
spring.kafka.producer.compression-type= # Compression type for all data generated by the producer. spring.kafka.producer.compression-type= # Compression type for all data generated by the producer.
spring.kafka.producer.key-serializer= # Serializer class for keys. spring.kafka.producer.key-serializer= # Serializer class for keys.
@ -1108,7 +1108,7 @@ content into your application. Rather, pick only the properties that you need.
spring.kafka.streams.application-id= # Kafka streams application.id property; default spring.application.name. spring.kafka.streams.application-id= # Kafka streams application.id property; default spring.application.name.
spring.kafka.streams.auto-startup=true # Whether or not to auto-start the streams factory bean. spring.kafka.streams.auto-startup=true # Whether or not to auto-start the streams factory bean.
spring.kafka.streams.bootstrap-servers= # Comma-delimited list of host:port pairs to use for establishing the initial connections to the Kafka cluster. Overrides the global property, for streams. spring.kafka.streams.bootstrap-servers= # Comma-delimited list of host:port pairs to use for establishing the initial connections to the Kafka cluster. Overrides the global property, for streams.
spring.kafka.streams.cache-max-bytes-buffering= # Maximum number of memory bytes to be used for buffering across all threads. spring.kafka.streams.cache-max-size-buffering= # Maximum number of memory size to be used for buffering across all threads.
spring.kafka.streams.client-id= # ID to pass to the server when making requests. Used for server-side logging. spring.kafka.streams.client-id= # ID to pass to the server when making requests. Used for server-side logging.
spring.kafka.streams.properties.*= # Additional Kafka properties used to configure the streams. spring.kafka.streams.properties.*= # Additional Kafka properties used to configure the streams.
spring.kafka.streams.replication-factor= # The replication factor for change log topics and repartition topics created by the stream processing application. spring.kafka.streams.replication-factor= # The replication factor for change log topics and repartition topics created by the stream processing application.
@ -1347,7 +1347,7 @@ content into your application. Rather, pick only the properties that you need.
management.health.defaults.enabled=true # Whether to enable default health indicators. management.health.defaults.enabled=true # Whether to enable default health indicators.
management.health.diskspace.enabled=true # Whether to enable disk space health check. management.health.diskspace.enabled=true # Whether to enable disk space health check.
management.health.diskspace.path= # Path used to compute the available disk space. management.health.diskspace.path= # Path used to compute the available disk space.
management.health.diskspace.threshold=0 # Minimum disk space, in bytes, that should be available. management.health.diskspace.threshold=10MB # Minimum disk space that should be available.
management.health.elasticsearch.enabled=true # Whether to enable Elasticsearch health check. management.health.elasticsearch.enabled=true # Whether to enable Elasticsearch health check.
management.health.elasticsearch.indices= # Comma-separated index names. management.health.elasticsearch.indices= # Comma-separated index names.
management.health.elasticsearch.response-timeout=100ms # Time to wait for a response from the cluster. management.health.elasticsearch.response-timeout=100ms # Time to wait for a response from the cluster.

@ -41,7 +41,7 @@ final class JettyHandlerWrappers {
static HandlerWrapper createGzipHandlerWrapper(Compression compression) { static HandlerWrapper createGzipHandlerWrapper(Compression compression) {
GzipHandler handler = new GzipHandler(); GzipHandler handler = new GzipHandler();
handler.setMinGzipSize(compression.getMinResponseSize()); handler.setMinGzipSize((int) compression.getMinResponseSize().toBytes());
handler.setIncludedMimeTypes(compression.getMimeTypes()); handler.setIncludedMimeTypes(compression.getMimeTypes());
for (HttpMethod httpMethod : HttpMethod.values()) { for (HttpMethod httpMethod : HttpMethod.values()) {
handler.addIncludedMethods(httpMethod.name()); handler.addIncludedMethods(httpMethod.name());

@ -51,8 +51,9 @@ final class CompressionCustomizer implements NettyServerCustomizer {
@Override @Override
public HttpServer apply(HttpServer server) { public HttpServer apply(HttpServer server) {
if (this.compression.getMinResponseSize() >= 0) { if (!this.compression.getMinResponseSize().isNegative()) {
server = server.compress(this.compression.getMinResponseSize()); server = server
.compress((int) this.compression.getMinResponseSize().toBytes());
} }
CompressionPredicate mimeTypes = getMimeTypesPredicate( CompressionPredicate mimeTypes = getMimeTypesPredicate(
this.compression.getMimeTypes()); this.compression.getMimeTypes());

@ -50,7 +50,7 @@ class CompressionConnectorCustomizer implements TomcatConnectorCustomizer {
private void customize(AbstractHttp11Protocol<?> protocol) { private void customize(AbstractHttp11Protocol<?> protocol) {
Compression compression = this.compression; Compression compression = this.compression;
protocol.setCompression("on"); protocol.setCompression("on");
protocol.setCompressionMinSize(compression.getMinResponseSize()); protocol.setCompressionMinSize((int) compression.getMinResponseSize().toBytes());
protocol.setCompressibleMimeType( protocol.setCompressibleMimeType(
StringUtils.arrayToCommaDelimitedString(compression.getMimeTypes())); StringUtils.arrayToCommaDelimitedString(compression.getMimeTypes()));
if (this.compression.getExcludedUserAgents() != null) { if (this.compression.getExcludedUserAgents() != null) {

@ -65,7 +65,8 @@ final class UndertowCompressionConfigurer {
private static Predicate[] getCompressionPredicates(Compression compression) { private static Predicate[] getCompressionPredicates(Compression compression) {
List<Predicate> predicates = new ArrayList<>(); List<Predicate> predicates = new ArrayList<>();
predicates.add(new MaxSizePredicate(compression.getMinResponseSize())); predicates.add(
new MaxSizePredicate((int) compression.getMinResponseSize().toBytes()));
predicates.add(new CompressibleMimeTypePredicate(compression.getMimeTypes())); predicates.add(new CompressibleMimeTypePredicate(compression.getMimeTypes()));
if (compression.getExcludedUserAgents() != null) { if (compression.getExcludedUserAgents() != null) {
for (String agent : compression.getExcludedUserAgents()) { for (String agent : compression.getExcludedUserAgents()) {

@ -16,6 +16,8 @@
package org.springframework.boot.web.server; package org.springframework.boot.web.server;
import org.springframework.util.unit.DataSize;
/** /**
* Simple server-independent abstraction for compression configuration. * Simple server-independent abstraction for compression configuration.
* *
@ -45,7 +47,7 @@ public class Compression {
/** /**
* Minimum "Content-Length" value that is required for compression to be performed. * Minimum "Content-Length" value that is required for compression to be performed.
*/ */
private int minResponseSize = 2048; private DataSize minResponseSize = DataSize.ofKilobytes(2);
public boolean getEnabled() { public boolean getEnabled() {
return this.enabled; return this.enabled;
@ -63,11 +65,11 @@ public class Compression {
this.mimeTypes = mimeTypes; this.mimeTypes = mimeTypes;
} }
public int getMinResponseSize() { public DataSize getMinResponseSize() {
return this.minResponseSize; return this.minResponseSize;
} }
public void setMinResponseSize(int minSize) { public void setMinResponseSize(DataSize minSize) {
this.minResponseSize = minSize; this.minResponseSize = minSize;
} }

@ -58,6 +58,7 @@ import org.springframework.http.server.reactive.HttpHandler;
import org.springframework.http.server.reactive.ServerHttpRequest; import org.springframework.http.server.reactive.ServerHttpRequest;
import org.springframework.http.server.reactive.ServerHttpResponse; import org.springframework.http.server.reactive.ServerHttpResponse;
import org.springframework.util.SocketUtils; import org.springframework.util.SocketUtils;
import org.springframework.util.unit.DataSize;
import org.springframework.web.reactive.function.BodyInserters; import org.springframework.web.reactive.function.BodyInserters;
import org.springframework.web.reactive.function.client.WebClient; import org.springframework.web.reactive.function.client.WebClient;
@ -265,7 +266,7 @@ public abstract class AbstractReactiveWebServerFactoryTests {
public void noCompressionForSmallResponse() { public void noCompressionForSmallResponse() {
Compression compression = new Compression(); Compression compression = new Compression();
compression.setEnabled(true); compression.setEnabled(true);
compression.setMinResponseSize(3001); compression.setMinResponseSize(DataSize.ofBytes(3001));
WebClient client = prepareCompressionTest(compression); WebClient client = prepareCompressionTest(compression);
ResponseEntity<Void> response = client.get().exchange() ResponseEntity<Void> response = client.get().exchange()
.flatMap((res) -> res.toEntity(Void.class)).block(); .flatMap((res) -> res.toEntity(Void.class)).block();

Loading…
Cancel
Save