diff --git a/core/src/it/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala b/core/src/it/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala index a4fb2df71..4fa2f99a7 100644 --- a/core/src/it/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala +++ b/core/src/it/scala/akka/persistence/jdbc/integration/HardDeleteQueryTest.scala @@ -8,16 +8,10 @@ import akka.persistence.jdbc.query.{ SqlServerCleaner } -class PostgresHardDeleteQueryTest - extends HardDeleteQueryTest("postgres-application-with-hard-delete.conf") - with PostgresCleaner +class PostgresHardDeleteQueryTest extends HardDeleteQueryTest("postgres-application.conf") with PostgresCleaner -class MySQLHardDeleteQueryTest extends HardDeleteQueryTest("mysql-application-with-hard-delete.conf") with MysqlCleaner +class MySQLHardDeleteQueryTest extends HardDeleteQueryTest("mysql-application.conf") with MysqlCleaner -class OracleHardDeleteQueryTest - extends HardDeleteQueryTest("oracle-application-with-hard-delete.conf") - with OracleCleaner +class OracleHardDeleteQueryTest extends HardDeleteQueryTest("oracle-application.conf") with OracleCleaner -class SqlServerHardDeleteQueryTest - extends HardDeleteQueryTest("sqlserver-application-with-hard-delete.conf") - with SqlServerCleaner +class SqlServerHardDeleteQueryTest extends HardDeleteQueryTest("sqlserver-application.conf") with SqlServerCleaner diff --git a/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalPerfSpec.scala b/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalPerfSpec.scala index 55bcf6f38..e1e9d5faf 100644 --- a/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalPerfSpec.scala +++ b/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalPerfSpec.scala @@ -17,10 +17,6 @@ class PostgresJournalPerfSpecSharedDb override def eventsCount: Int = 100 } -class PostgresJournalPerfSpecPhysicalDelete extends PostgresJournalPerfSpec { - this.cfg.withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)) -} - class MySQLJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("mysql-application.conf"), MySQL) { override def eventsCount: Int = 100 } @@ -30,10 +26,6 @@ class MySQLJournalPerfSpecSharedDb override def eventsCount: Int = 100 } -class MySQLJournalPerfSpecPhysicalDelete extends MySQLJournalPerfSpec { - this.cfg.withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)) -} - class OracleJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("oracle-application.conf"), Oracle) { override def eventsCount: Int = 100 } @@ -43,10 +35,6 @@ class OracleJournalPerfSpecSharedDb override def eventsCount: Int = 100 } -class OracleJournalPerfSpecPhysicalDelete extends OracleJournalPerfSpec { - this.cfg.withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)) -} - class SqlServerJournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("sqlserver-application.conf"), SqlServer) { override def eventsCount: Int = 100 @@ -56,7 +44,3 @@ class SqlServerJournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("sqlserver-shared-db-application.conf"), SqlServer) { override def eventsCount: Int = 100 } - -class SqlServerJournalPerfSpecPhysicalDelete extends SqlServerJournalPerfSpec { - this.cfg.withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)) -} diff --git a/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalSpec.scala b/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalSpec.scala index ac4337dc8..7a5fcfac7 100644 --- a/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalSpec.scala +++ b/core/src/it/scala/akka/persistence/jdbc/integration/JdbcJournalSpec.scala @@ -1,46 +1,19 @@ package akka.persistence.jdbc.integration import akka.persistence.jdbc.journal.JdbcJournalSpec -import akka.persistence.jdbc.testkit.internal.MySQL -import akka.persistence.jdbc.testkit.internal.Oracle -import akka.persistence.jdbc.testkit.internal.Postgres -import akka.persistence.jdbc.testkit.internal.SqlServer -import com.typesafe.config.{ ConfigFactory, ConfigValueFactory } +import akka.persistence.jdbc.testkit.internal.{ MySQL, Oracle, Postgres, SqlServer } +import com.typesafe.config.ConfigFactory class PostgresJournalSpec extends JdbcJournalSpec(ConfigFactory.load("postgres-application.conf"), Postgres) class PostgresJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("postgres-shared-db-application.conf"), Postgres) -class PostgresJournalSpecPhysicalDelete - extends JdbcJournalSpec( - ConfigFactory - .load("postgres-application.conf") - .withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)), - Postgres) class MySQLJournalSpec extends JdbcJournalSpec(ConfigFactory.load("mysql-application.conf"), MySQL) class MySQLJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("mysql-shared-db-application.conf"), MySQL) -class MySQLJournalSpecPhysicalDelete - extends JdbcJournalSpec( - ConfigFactory - .load("mysql-application.conf") - .withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)), - MySQL) class OracleJournalSpec extends JdbcJournalSpec(ConfigFactory.load("oracle-application.conf"), Oracle) class OracleJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("oracle-shared-db-application.conf"), Oracle) -class OracleJournalSpecPhysicalDelete - extends JdbcJournalSpec( - ConfigFactory - .load("oracle-application.conf") - .withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)), - Oracle) class SqlServerJournalSpec extends JdbcJournalSpec(ConfigFactory.load("sqlserver-application.conf"), SqlServer) class SqlServerJournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("sqlserver-shared-db-application.conf"), SqlServer) -class SqlServerJournalSpecPhysicalDelete - extends JdbcJournalSpec( - ConfigFactory - .load("sqlserver-application.conf") - .withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)), - SqlServer) diff --git a/core/src/it/scala/akka/persistence/jdbc/integration/LogicalDeleteQueryTest.scala b/core/src/it/scala/akka/persistence/jdbc/integration/LogicalDeleteQueryTest.scala deleted file mode 100644 index 6163c933b..000000000 --- a/core/src/it/scala/akka/persistence/jdbc/integration/LogicalDeleteQueryTest.scala +++ /dev/null @@ -1,17 +0,0 @@ -package akka.persistence.jdbc.integration - -import akka.persistence.jdbc.query.{ - LogicalDeleteQueryTest, - MysqlCleaner, - OracleCleaner, - PostgresCleaner, - SqlServerCleaner -} - -class PostgresLogicalDeleteQueryTest extends LogicalDeleteQueryTest("postgres-application.conf") with PostgresCleaner - -class MySQLLogicalDeleteQueryTest extends LogicalDeleteQueryTest("mysql-application.conf") with MysqlCleaner - -class OracleLogicalDeleteQueryTest extends LogicalDeleteQueryTest("oracle-application.conf") with OracleCleaner - -class SqlServerLogicalDeleteQueryTest extends LogicalDeleteQueryTest("sqlserver-application.conf") with SqlServerCleaner diff --git a/core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes b/core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes new file mode 100644 index 000000000..f3fb3c0e2 --- /dev/null +++ b/core/src/main/mima-filters/5.1.0.backwards.excludes/issue-557-logical-delete.excludes @@ -0,0 +1,4 @@ +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.config.BaseDaoConfig.logicalDelete") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.config.ReadJournalConfig.includeDeleted") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.BaseByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation") +ProblemFilters.exclude[DirectMissingMethodProblem]("akka.persistence.jdbc.journal.dao.legacy.ByteArrayJournalDao.logWarnAboutLogicalDeletionDeprecation") diff --git a/core/src/main/resources/reference.conf b/core/src/main/resources/reference.conf index c8ce90aae..c44858f46 100644 --- a/core/src/main/resources/reference.conf +++ b/core/src/main/resources/reference.conf @@ -14,18 +14,6 @@ akka-persistence-jdbc { - # If set to true event deletion is performed as a soft, logical delete. Events are kept in the journal and are sill - # delivered in queries. Otherwise, a hard delete will be performed. - # - # Note that even when configured for hard deletes, the last event is kept as a logical delete. - # This is necessary because we must keep track of the highest sequence number that was ever used on a - # given persistent actor. However, this last event will be 'invisible' it won't be ever replay nor delivered on queries. - # - # This property affects jdbc-journal.logicalDelete and jdbc-read-journal.includeLogicallyDeleted. - # - logicalDeletion.enable = true - - # The tag separator to use when tagging events with more than one tag. # This property affects jdbc-journal.tagSeparator and jdbc-read-journal.tagSeparator. tagSeparator = "," @@ -191,11 +179,6 @@ jdbc-journal { # The maximum number of batch-inserts that may be running concurrently parallelism = 8 - # Only mark as deleted. If false, delete physically - # should not be configured directly, but through property akka-persistence-jdbc.logicalDelete.enable - # in order to keep consistent behavior over write/read sides - logicalDelete = ${akka-persistence-jdbc.logicalDeletion.enable} - # This setting can be used to configure usage of a shared database. # To disable usage of a shared database, set to null or an empty string. # When set to a non empty string, this setting does two things: @@ -438,11 +421,6 @@ jdbc-read-journal { dao = "akka.persistence.jdbc.query.dao.DefaultReadJournalDao" - # if true, queries will include logically deleted events - # should not be configured directly, but through property akka-persistence-jdbc.logicalDelete.enable - # in order to keep consistent behavior over write/read sides - includeLogicallyDeleted = ${akka-persistence-jdbc.logicalDeletion.enable} - # Settings for determining if ids (ordering column) in the journal are out of sequence. journal-sequence-retrieval { # The maximum number of ids that will be retrieved in each batch diff --git a/core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala b/core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala index 38f7d0b6b..4613ae33c 100644 --- a/core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala +++ b/core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala @@ -129,8 +129,7 @@ class BaseDaoConfig(config: Config) { val batchSize: Int = config.getInt("batchSize") val replayBatchSize: Int = config.getInt("replayBatchSize") val parallelism: Int = config.getInt("parallelism") - val logicalDelete: Boolean = config.getBoolean("logicalDelete") - override def toString: String = s"BaseDaoConfig($bufferSize,$batchSize,$parallelism,$logicalDelete)" + override def toString: String = s"BaseDaoConfig($bufferSize,$batchSize,$parallelism)" } class ReadJournalPluginConfig(config: Config) { @@ -189,10 +188,9 @@ class ReadJournalConfig(config: Config) { val refreshInterval: FiniteDuration = config.asFiniteDuration("refresh-interval") val maxBufferSize: Int = config.getInt("max-buffer-size") val addShutdownHook: Boolean = config.getBoolean("add-shutdown-hook") - val includeDeleted: Boolean = config.getBoolean("includeLogicallyDeleted") override def toString: String = - s"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook,$includeDeleted)" + s"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook)" } class DurableStateTableColumnNames(config: Config) { diff --git a/core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala b/core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala index 79d5d4f3c..3143bff63 100644 --- a/core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala +++ b/core/src/main/scala/akka/persistence/jdbc/journal/dao/legacy/ByteArrayJournalDao.scala @@ -5,8 +5,8 @@ package akka.persistence.jdbc.journal.dao.legacy -import akka.persistence.jdbc.journal.dao.{ BaseDao, BaseJournalDaoWithReadMessages, H2Compat, JournalDaoWithUpdates } import akka.persistence.jdbc.config.{ BaseDaoConfig, JournalConfig } +import akka.persistence.jdbc.journal.dao.{ BaseDao, BaseJournalDaoWithReadMessages, H2Compat, JournalDaoWithUpdates } import akka.persistence.jdbc.serialization.FlowPersistentReprSerializer import akka.persistence.{ AtomicWrite, PersistentRepr } import akka.serialization.Serialization @@ -48,21 +48,10 @@ trait BaseByteArrayJournalDao implicit val ec: ExecutionContext implicit val mat: Materializer - import journalConfig.daoConfig.logicalDelete import profile.api._ val logger = LoggerFactory.getLogger(this.getClass) - // This logging may block since we don't control how the user will configure logback - // We can't use a Akka logging neither because we don't have an ActorSystem in scope and - // we should not introduce another dependency here. - // Therefore, we make sure we only log a warning for logical deletes once - lazy val logWarnAboutLogicalDeletionDeprecation = { - logger.warn( - "Logical deletion of events is deprecated and will be removed in akka-persistence-jdbc in a later version " + - "To disable it in this current version you must set the property 'akka-persistence-jdbc.logicalDeletion.enable' to false.") - } - def writeJournalRows(xs: Seq[JournalRow]): Future[Unit] = { // Write atomically without auto-commit db.run(queries.writeJournalRows(xs).transactionally).map(_ => ()) } @@ -85,26 +74,17 @@ trait BaseByteArrayJournalDao queueWriteJournalRows(rowsToWrite).map(_ => resultWhenWriteComplete) } - override def delete(persistenceId: String, maxSequenceNr: Long): Future[Unit] = - if (logicalDelete) { - // We only log a warning when user effectively deletes an event. - // The rationale here is that this feature is not so broadly used and the default - // is to have logical delete enabled. - // We don't want to log warnings for users that are not using this, - // so we make it happen only when effectively used. - logWarnAboutLogicalDeletionDeprecation - db.run(queries.markJournalMessagesAsDeleted(persistenceId, maxSequenceNr)).map(_ => ()) - } else { - // We should keep journal record with highest sequence number in order to be compliant - // with @see [[akka.persistence.journal.JournalSpec]] - val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = for { - _ <- queries.markJournalMessagesAsDeleted(persistenceId, maxSequenceNr) - highestMarkedSequenceNr <- highestMarkedSequenceNr(persistenceId) - _ <- queries.delete(persistenceId, highestMarkedSequenceNr.getOrElse(0L) - 1) - } yield () - - db.run(actions.transactionally) - } + override def delete(persistenceId: String, maxSequenceNr: Long): Future[Unit] = { + // We should keep journal record with highest sequence number in order to be compliant + // with @see [[akka.persistence.journal.JournalSpec]] + val actions: DBIOAction[Unit, NoStream, Effect.Write with Effect.Read] = for { + _ <- queries.markJournalMessagesAsDeleted(persistenceId, maxSequenceNr) + highestMarkedSequenceNr <- highestMarkedSequenceNr(persistenceId) + _ <- queries.delete(persistenceId, highestMarkedSequenceNr.getOrElse(0L) - 1) + } yield () + + db.run(actions.transactionally) + } def update(persistenceId: String, sequenceNr: Long, payload: AnyRef): Future[Done] = { val write = PersistentRepr(payload, sequenceNr, persistenceId) @@ -140,5 +120,4 @@ trait BaseByteArrayJournalDao case Success((repr, _, ordering)) => Success(repr -> ordering) case Failure(e) => Failure(e) } - } diff --git a/core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala index 454bbbed7..3684eb9af 100644 --- a/core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/jdbc/query/dao/ReadJournalQueries.scala @@ -25,8 +25,7 @@ class ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJo baseTableQuery().map(_.persistenceId).distinct.take(max) private def baseTableQuery() = - if (readJournalConfig.includeDeleted) JournalTable - else JournalTable.filter(_.deleted === false) + JournalTable.filter(_.deleted === false) private def baseTableWithTagsQuery() = { baseTableQuery().join(TagTable).on(_.ordering === _.eventId) @@ -43,6 +42,7 @@ class ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJo .filter(_.persistenceId === persistenceId) .filter(_.sequenceNumber >= fromSequenceNr) .filter(_.sequenceNumber <= toSequenceNr) + .filter(!_.deleted) .sortBy(_.sequenceNumber.asc) .take(max) diff --git a/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala b/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala index 05c40ec03..4c15231a6 100644 --- a/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala +++ b/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ByteArrayReadJournalDao.scala @@ -124,19 +124,7 @@ trait OracleReadJournalDao extends ReadJournalDao { val theTag = s"%$tag%" val selectStatement = - if (readJournalConfig.includeDeleted) - sql""" - SELECT "#$ordering", "#$deleted", "#$persistenceId", "#$sequenceNumber", "#$message", "#$tags" - FROM ( - SELECT * FROM #$theTableName - WHERE "#$tags" LIKE $theTag - AND "#$ordering" > $theOffset - AND "#$ordering" <= $maxOffset - ORDER BY "#$ordering" - ) - WHERE rownum <= $max""".as[JournalRow] - else - sql""" + sql""" SELECT "#$ordering", "#$deleted", "#$persistenceId", "#$sequenceNumber", "#$message", "#$tags" FROM ( SELECT * FROM #$theTableName diff --git a/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala b/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala index 1471bd75d..0bc87fa65 100644 --- a/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala +++ b/core/src/main/scala/akka/persistence/jdbc/query/dao/legacy/ReadJournalQueries.scala @@ -24,8 +24,7 @@ class ReadJournalQueries(val profile: JdbcProfile, val readJournalConfig: ReadJo baseTableQuery().map(_.persistenceId).distinct.take(max) private def baseTableQuery() = - if (readJournalConfig.includeDeleted) JournalTable - else JournalTable.filter(_.deleted === false) + JournalTable.filter(_.deleted === false) val allPersistenceIdsDistinct = Compiled(_allPersistenceIdsDistinct _) diff --git a/core/src/test/resources/general.conf b/core/src/test/resources/general.conf index 4169405e0..ba9873cf8 100644 --- a/core/src/test/resources/general.conf +++ b/core/src/test/resources/general.conf @@ -43,7 +43,6 @@ docker { } jdbc-journal { - logicalDelete = ${akka-persistence-jdbc.logicalDeletion.enable} event-adapters { test-write-event-adapter = "akka.persistence.jdbc.query.EventAdapterTest$TestWriteEventAdapter" test-read-event-adapter = "akka.persistence.jdbc.query.EventAdapterTest$TestReadEventAdapter" @@ -59,7 +58,6 @@ jdbc-journal { jdbc-read-journal { - includeLogicallyDeleted = ${akka-persistence-jdbc.logicalDeletion.enable} refresh-interval = "10ms" max-buffer-size = "500" } diff --git a/core/src/test/resources/h2-application-with-hard-delete.conf b/core/src/test/resources/h2-application-with-hard-delete.conf deleted file mode 100644 index b793fdece..000000000 --- a/core/src/test/resources/h2-application-with-hard-delete.conf +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2016 Dennis Vriend -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -// general.conf is included only for shared settings used for the akka-persistence-jdbc tests -include "general.conf" -include "h2-application.conf" - -akka-persistence-jdbc.logicalDeletion.enable = false diff --git a/core/src/test/resources/mysql-application-with-hard-delete.conf b/core/src/test/resources/mysql-application-with-hard-delete.conf deleted file mode 100644 index 93dc262b8..000000000 --- a/core/src/test/resources/mysql-application-with-hard-delete.conf +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2016 Dennis Vriend -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -// general.conf is included only for shared settings used for the akka-persistence-jdbc tests -include "general.conf" -include "mysql-application.conf" - -akka-persistence-jdbc.logicalDeletion.enable = false diff --git a/core/src/test/resources/oracle-application-with-hard-delete.conf b/core/src/test/resources/oracle-application-with-hard-delete.conf deleted file mode 100644 index e8b088790..000000000 --- a/core/src/test/resources/oracle-application-with-hard-delete.conf +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2016 Dennis Vriend -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -// general.conf is included only for shared settings used for the akka-persistence-jdbc tests -include "general.conf" -include "oracle-application.conf" - -akka-persistence-jdbc.logicalDeletion.enable = false diff --git a/core/src/test/resources/postgres-application-with-hard-delete.conf b/core/src/test/resources/postgres-application-with-hard-delete.conf deleted file mode 100644 index 578420e11..000000000 --- a/core/src/test/resources/postgres-application-with-hard-delete.conf +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2016 Dennis Vriend -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -// general.conf is included only for shared settings used for the akka-persistence-jdbc tests -include "general.conf" -include "postgres-application.conf" - -akka-persistence-jdbc.logicalDeletion.enable = false diff --git a/core/src/test/resources/sqlserver-application-with-hard-delete.conf b/core/src/test/resources/sqlserver-application-with-hard-delete.conf deleted file mode 100644 index 4f0dda093..000000000 --- a/core/src/test/resources/sqlserver-application-with-hard-delete.conf +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2016 Dennis Vriend -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -// general.conf is included only for shared settings used for the akka-persistence-jdbc tests -include "general.conf" -include "sqlserver-application.conf" - -akka-persistence-jdbc.logicalDeletion.enable = false diff --git a/core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala b/core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala index 260ea14fa..6dce8baa7 100644 --- a/core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala +++ b/core/src/test/scala/akka/persistence/jdbc/configuration/AkkaPersistenceConfigTest.scala @@ -7,211 +7,209 @@ package akka.persistence.jdbc.configuration import akka.persistence.jdbc.config._ import com.typesafe.config.{ Config, ConfigFactory } - -import scala.concurrent.duration._ import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers +import scala.concurrent.duration._ + class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers { private val referenceConfig: Config = ConfigFactory.load("reference") val config: Config = ConfigFactory .parseString(""" - |akka-persistence-jdbc.slick.db { - | host = - | port = - | name = - |} - | - |jdbc-journal { - | class = "akka.persistence.jdbc.journal.JdbcAsyncWriteJournal" - | - | tables { - | journal { - | tableName = "journal" - | schemaName = "" - | columnNames { - | ordering = "ordering" - | persistenceId = "persistence_id" - | sequenceNumber = "sequence_number" - | deleted = "deleted" - | tags = "tags" - | message = "message" - | } - | } - | } - | - | tagSeparator = "," - | - | dao = "akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao" - | - | logicalDelete = true - | - | slick { - | profile = "slick.jdbc.PostgresProfile$" - | db { - | host = "localhost" - | host = ${?POSTGRES_HOST} - | port = "5432" - | port = ${?POSTGRES_PORT} - | name = "docker" - | - | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} - | user = "docker" - | password = "docker" - | driver = "org.postgresql.Driver$" - | - | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP - | - | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing - | // slick will use an async executor with a fixed size queue of 10.000 objects - | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. - | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. - | queueSize = 10000 // number of objects that can be queued by the async exector - | - | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) - | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 - | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) - | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) - | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 - | - | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true - | - | keepAliveConnection = on // ensures that the database does not get dropped while we are using it - | - | numThreads = 4 // number of cores - | maxConnections = 4 // same as numThreads - | minConnections = 4 // same as numThreads - | } - | } - |} - | - |# the akka-persistence-snapshot-store in use - |jdbc-snapshot-store { - | class = "akka.persistence.jdbc.snapshot.JdbcSnapshotStore" - | - | tables { - | snapshot { - | tableName = "snapshot" - | schemaName = "" - | columnNames { - | persistenceId = "persistence_id" - | sequenceNumber = "sequence_number" - | created = "created" - | snapshot = "snapshot" - | } - | } - | } - | - | dao = "akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao" - | - | slick { - | profile = "slick.jdbc.MySQLProfile$" - | db { - | host = "localhost" - | host = ${?POSTGRES_HOST} - | port = "5432" - | port = ${?POSTGRES_PORT} - | name = "docker" - | - | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} - | user = "docker" - | password = "docker" - | driver = "org.postgresql.Driver" - | - | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP - | - | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing - | // slick will use an async executor with a fixed size queue of 10.000 objects - | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. - | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. - | queueSize = 10000 // number of objects that can be queued by the async exector - | - | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) - | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 - | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) - | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) - | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 - | - | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true - | - | keepAliveConnection = on // ensures that the database does not get dropped while we are using it - | - | numThreads = 4 // number of cores - | maxConnections = 4 // same as numThreads - | minConnections = 4 // same as numThreads - | } - | } - |} - | - |# the akka-persistence-query provider in use - |jdbc-read-journal { - | class = "akka.persistence.jdbc.query.JdbcReadJournalProvider" - | - | # New events are retrieved (polled) with this interval. - | refresh-interval = "300ms" - | - | # How many events to fetch in one query (replay) and keep buffered until they - | # are delivered downstreams. - | max-buffer-size = "10" - | - | dao = "akka.persistence.jdbc.dao.bytea.readjournal.ByteArrayReadJournalDao" - | - | tables { - | journal { - | tableName = "journal" - | schemaName = "" - | columnNames { - | ordering = "ordering" - | persistenceId = "persistence_id" - | sequenceNumber = "sequence_number" - | created = "created" - | tags = "tags" - | message = "message" - | } - | } - | } - | - | tagSeparator = "," - | - | slick { - | profile = "slick.jdbc.OracleProfile$" - | db { - | host = "localhost" - | host = ${?POSTGRES_HOST} - | port = "5432" - | port = ${?POSTGRES_PORT} - | name = "docker" - | - | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} - | user = "docker" - | password = "docker" - | driver = "org.postgresql.Driver" - | - | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP - | - | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing - | // slick will use an async executor with a fixed size queue of 10.000 objects - | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. - | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. - | queueSize = 10000 // number of objects that can be queued by the async exector - | - | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) - | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 - | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) - | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) - | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 - | - | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true - | - | keepAliveConnection = on // ensures that the database does not get dropped while we are using it - | - | numThreads = 4 // number of cores - | maxConnections = 4 // same as numThreads - | minConnections = 4 // same as numThreads - | } - | } - |} + |akka-persistence-jdbc.slick.db { + | host = + | port = + | name = + |} + | + |jdbc-journal { + | class = "akka.persistence.jdbc.journal.JdbcAsyncWriteJournal" + | + | tables { + | journal { + | tableName = "journal" + | schemaName = "" + | columnNames { + | ordering = "ordering" + | persistenceId = "persistence_id" + | sequenceNumber = "sequence_number" + | deleted = "deleted" + | tags = "tags" + | message = "message" + | } + | } + | } + | + | tagSeparator = "," + | + | dao = "akka.persistence.jdbc.dao.bytea.journal.ByteArrayJournalDao" + | + | slick { + | profile = "slick.jdbc.PostgresProfile$" + | db { + | host = "localhost" + | host = ${?POSTGRES_HOST} + | port = "5432" + | port = ${?POSTGRES_PORT} + | name = "docker" + | + | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} + | user = "docker" + | password = "docker" + | driver = "org.postgresql.Driver$" + | + | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP + | + | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing + | // slick will use an async executor with a fixed size queue of 10.000 objects + | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. + | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. + | queueSize = 10000 // number of objects that can be queued by the async exector + | + | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) + | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 + | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) + | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) + | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 + | + | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true + | + | keepAliveConnection = on // ensures that the database does not get dropped while we are using it + | + | numThreads = 4 // number of cores + | maxConnections = 4 // same as numThreads + | minConnections = 4 // same as numThreads + | } + | } + |} + | + |# the akka-persistence-snapshot-store in use + |jdbc-snapshot-store { + | class = "akka.persistence.jdbc.snapshot.JdbcSnapshotStore" + | + | tables { + | snapshot { + | tableName = "snapshot" + | schemaName = "" + | columnNames { + | persistenceId = "persistence_id" + | sequenceNumber = "sequence_number" + | created = "created" + | snapshot = "snapshot" + | } + | } + | } + | + | dao = "akka.persistence.jdbc.dao.bytea.snapshot.ByteArraySnapshotDao" + | + | slick { + | profile = "slick.jdbc.MySQLProfile$" + | db { + | host = "localhost" + | host = ${?POSTGRES_HOST} + | port = "5432" + | port = ${?POSTGRES_PORT} + | name = "docker" + | + | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} + | user = "docker" + | password = "docker" + | driver = "org.postgresql.Driver" + | + | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP + | + | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing + | // slick will use an async executor with a fixed size queue of 10.000 objects + | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. + | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. + | queueSize = 10000 // number of objects that can be queued by the async exector + | + | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) + | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 + | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) + | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) + | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 + | + | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true + | + | keepAliveConnection = on // ensures that the database does not get dropped while we are using it + | + | numThreads = 4 // number of cores + | maxConnections = 4 // same as numThreads + | minConnections = 4 // same as numThreads + | } + | } + |} + | + |# the akka-persistence-query provider in use + |jdbc-read-journal { + | class = "akka.persistence.jdbc.query.JdbcReadJournalProvider" + | + | # New events are retrieved (polled) with this interval. + | refresh-interval = "300ms" + | + | # How many events to fetch in one query (replay) and keep buffered until they + | # are delivered downstreams. + | max-buffer-size = "10" + | + | dao = "akka.persistence.jdbc.dao.bytea.readjournal.ByteArrayReadJournalDao" + | + | tables { + | journal { + | tableName = "journal" + | schemaName = "" + | columnNames { + | ordering = "ordering" + | persistenceId = "persistence_id" + | sequenceNumber = "sequence_number" + | created = "created" + | tags = "tags" + | message = "message" + | } + | } + | } + | + | tagSeparator = "," + | + | slick { + | profile = "slick.jdbc.OracleProfile$" + | db { + | host = "localhost" + | host = ${?POSTGRES_HOST} + | port = "5432" + | port = ${?POSTGRES_PORT} + | name = "docker" + | + | url = "jdbc:postgresql://"${akka-persistence-jdbc.slick.db.host}":"${akka-persistence-jdbc.slick.db.port}"/"${akka-persistence-jdbc.slick.db.name} + | user = "docker" + | password = "docker" + | driver = "org.postgresql.Driver" + | + | // hikariCP settings; see: https://github.com/brettwooldridge/HikariCP + | + | // read: https://github.com/brettwooldridge/HikariCP/wiki/About-Pool-Sizing + | // slick will use an async executor with a fixed size queue of 10.000 objects + | // The async executor is a connection pool for asynchronous execution of blocking I/O actions. + | // This is used for the asynchronous query execution API on top of blocking back-ends like JDBC. + | queueSize = 10000 // number of objects that can be queued by the async exector + | + | connectionTimeout = 30000 // This property controls the maximum number of milliseconds that a client (that's you) will wait for a connection from the pool. If this time is exceeded without a connection becoming available, a SQLException will be thrown. 1000ms is the minimum value. Default: 30000 (30 seconds) + | validationTimeout = 5000 // This property controls the maximum amount of time that a connection will be tested for aliveness. This value must be less than the connectionTimeout. The lowest accepted validation timeout is 1000ms (1 second). Default: 5000 + | idleTimeout = 600000 // 10 minutes: This property controls the maximum amount of time that a connection is allowed to sit idle in the pool. Whether a connection is retired as idle or not is subject to a maximum variation of +30 seconds, and average variation of +15 seconds. A connection will never be retired as idle before this timeout. A value of 0 means that idle connections are never removed from the pool. Default: 600000 (10 minutes) + | maxLifetime = 1800000 // 30 minutes: This property controls the maximum lifetime of a connection in the pool. When a connection reaches this timeout it will be retired from the pool, subject to a maximum variation of +30 seconds. An in-use connection will never be retired, only when it is closed will it then be removed. We strongly recommend setting this value, and it should be at least 30 seconds less than any database-level connection timeout. A value of 0 indicates no maximum lifetime (infinite lifetime), subject of course to the idleTimeout setting. Default: 1800000 (30 minutes) + | leakDetectionThreshold = 0 // This property controls the amount of time that a connection can be out of the pool before a message is logged indicating a possible connection leak. A value of 0 means leak detection is disabled. Lowest acceptable value for enabling leak detection is 2000 (2 secs). Default: 0 + | + | initializationFailFast = true // This property controls whether the pool will "fail fast" if the pool cannot be seeded with initial connections successfully. If you want your application to start even when the database is down/unavailable, set this property to false. Default: true + | + | keepAliveConnection = on // ensures that the database does not get dropped while we are using it + | + | numThreads = 4 // number of cores + | maxConnections = 4 // same as numThreads + | minConnections = 4 // same as numThreads + | } + | } + |} """.stripMargin) .withFallback(referenceConfig) .resolve() @@ -234,8 +232,6 @@ class AkkaPersistenceConfigTest extends AnyFlatSpec with Matchers { cfg.journalTableConfiguration.columnNames.persistenceId shouldBe "persistence_id" cfg.journalTableConfiguration.columnNames.sequenceNumber shouldBe "sequence_number" cfg.journalTableConfiguration.columnNames.tags shouldBe "tags" - - cfg.daoConfig.logicalDelete shouldBe true } it should "parse SnapshotConfig" in { diff --git a/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala b/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala index 9505c86f9..ed5fc61bd 100644 --- a/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala +++ b/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalPerfSpec.scala @@ -5,28 +5,21 @@ package akka.persistence.jdbc.journal -import scala.concurrent.duration._ - import akka.actor.Props import akka.persistence.CapabilityFlag import akka.persistence.jdbc.config._ import akka.persistence.jdbc.db.SlickExtension -import akka.persistence.jdbc.testkit.internal.H2 -import akka.persistence.jdbc.testkit.internal.SchemaType -import akka.persistence.jdbc.util.ClasspathResources -import akka.persistence.jdbc.util.DropCreate +import akka.persistence.jdbc.testkit.internal.{ H2, SchemaType } +import akka.persistence.jdbc.util.{ ClasspathResources, DropCreate } import akka.persistence.journal.JournalPerfSpec -import akka.persistence.journal.JournalPerfSpec.BenchActor -import akka.persistence.journal.JournalPerfSpec.Cmd -import akka.persistence.journal.JournalPerfSpec.ResetCounter +import akka.persistence.journal.JournalPerfSpec.{ BenchActor, Cmd, ResetCounter } import akka.testkit.TestProbe -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory -import com.typesafe.config.ConfigValueFactory -import org.scalatest.BeforeAndAfterAll -import org.scalatest.BeforeAndAfterEach +import com.typesafe.config.{ Config, ConfigFactory } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import org.scalatest.concurrent.ScalaFutures +import scala.concurrent.duration._ + abstract class JdbcJournalPerfSpec(config: Config, schemaType: SchemaType) extends JournalPerfSpec(config) with BeforeAndAfterAll @@ -114,7 +107,3 @@ abstract class JdbcJournalPerfSpec(config: Config, schemaType: SchemaType) class H2JournalPerfSpec extends JdbcJournalPerfSpec(ConfigFactory.load("h2-application.conf"), H2) class H2JournalPerfSpecSharedDb extends JdbcJournalPerfSpec(ConfigFactory.load("h2-shared-db-application.conf"), H2) - -class H2JournalPerfSpecPhysicalDelete extends H2JournalPerfSpec { - this.cfg.withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)) -} diff --git a/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala b/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala index cf9e0f923..ecfe352b9 100644 --- a/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala +++ b/core/src/test/scala/akka/persistence/jdbc/journal/JdbcJournalSpec.scala @@ -5,22 +5,17 @@ package akka.persistence.jdbc.journal -import scala.concurrent.duration._ - import akka.persistence.CapabilityFlag import akka.persistence.jdbc.config._ import akka.persistence.jdbc.db.SlickExtension -import akka.persistence.jdbc.testkit.internal.H2 -import akka.persistence.jdbc.testkit.internal.SchemaType -import akka.persistence.jdbc.util.ClasspathResources -import akka.persistence.jdbc.util.DropCreate +import akka.persistence.jdbc.testkit.internal.{ H2, SchemaType } +import akka.persistence.jdbc.util.{ ClasspathResources, DropCreate } import akka.persistence.journal.JournalSpec -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory -import com.typesafe.config.ConfigValueFactory +import com.typesafe.config.{ Config, ConfigFactory } +import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import org.scalatest.concurrent.ScalaFutures -import org.scalatest.BeforeAndAfterAll -import org.scalatest.BeforeAndAfterEach + +import scala.concurrent.duration._ abstract class JdbcJournalSpec(config: Config, schemaType: SchemaType) extends JournalSpec(config) @@ -57,9 +52,3 @@ abstract class JdbcJournalSpec(config: Config, schemaType: SchemaType) class H2JournalSpec extends JdbcJournalSpec(ConfigFactory.load("h2-application.conf"), H2) class H2JournalSpecSharedDb extends JdbcJournalSpec(ConfigFactory.load("h2-shared-db-application.conf"), H2) -class H2JournalSpecPhysicalDelete - extends JdbcJournalSpec( - ConfigFactory - .load("h2-application.conf") - .withValue("jdbc-journal.logicalDelete", ConfigValueFactory.fromAnyRef(false)), - H2) diff --git a/core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala b/core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala index 893215cd9..c71a450ee 100644 --- a/core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala +++ b/core/src/test/scala/akka/persistence/jdbc/query/HardDeleteQueryTest.scala @@ -95,4 +95,4 @@ abstract class HardDeleteQueryTest(config: String) extends QueryTestSpec(config) } } -class H2HardDeleteQueryTest extends HardDeleteQueryTest("h2-application-with-hard-delete.conf") with H2Cleaner +class H2HardDeleteQueryTest extends HardDeleteQueryTest("h2-application.conf") with H2Cleaner diff --git a/core/src/test/scala/akka/persistence/jdbc/query/LogicalDeleteQueryTest.scala b/core/src/test/scala/akka/persistence/jdbc/query/LogicalDeleteQueryTest.scala deleted file mode 100644 index 893eae272..000000000 --- a/core/src/test/scala/akka/persistence/jdbc/query/LogicalDeleteQueryTest.scala +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright (C) 2014 - 2019 Dennis Vriend - * Copyright (C) 2019 - 2021 Lightbend Inc. - */ - -package akka.persistence.jdbc.query - -import akka.persistence.query.{ EventEnvelope, NoOffset, Sequence } -import akka.pattern._ -import scala.concurrent.duration._ - -abstract class LogicalDeleteQueryTest(config: String) extends QueryTestSpec(config) { - implicit val askTimeout = 500.millis - - it should "return logically deleted events when using CurrentEventsByTag (backward compatibility)" in withActorSystem { - implicit system => - val journalOps = new ScalaJdbcReadJournalOperations(system) - withTestActors(replyToMessages = true) { (actor1, _, _) => - (actor1 ? withTags(1, "number")).futureValue - (actor1 ? withTags(2, "number")).futureValue - (actor1 ? withTags(3, "number")).futureValue - - // delete and wait for confirmation - (actor1 ? DeleteCmd(1)).futureValue - - journalOps.withCurrentEventsByTag()("number", NoOffset) { tp => - tp.request(Int.MaxValue) - tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } - tp.expectComplete() - } - } - } - - it should "return logically deleted events when using EventsByTag (backward compatibility)" in withActorSystem { - implicit system => - val journalOps = new ScalaJdbcReadJournalOperations(system) - withTestActors(replyToMessages = true) { (actor1, _, _) => - (actor1 ? withTags(1, "number")).futureValue - (actor1 ? withTags(2, "number")).futureValue - (actor1 ? withTags(3, "number")).futureValue - - // delete and wait for confirmation - (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" - - journalOps.withEventsByTag()("number", NoOffset) { tp => - tp.request(Int.MaxValue) - tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } - tp.cancel() - } - } - } - - it should "return logically deleted events when using CurrentEventsByPersistenceId (backward compatibility)" in withActorSystem { - implicit system => - val journalOps = new ScalaJdbcReadJournalOperations(system) - withTestActors(replyToMessages = true) { (actor1, _, _) => - (actor1 ? withTags(1, "number")).futureValue - (actor1 ? withTags(2, "number")).futureValue - (actor1 ? withTags(3, "number")).futureValue - - // delete and wait for confirmation - (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" - - journalOps.withCurrentEventsByPersistenceId()("my-1") { tp => - tp.request(Int.MaxValue) - tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } - tp.expectComplete() - } - } - } - - it should "return logically deleted events when using EventsByPersistenceId (backward compatibility)" in withActorSystem { - implicit system => - val journalOps = new ScalaJdbcReadJournalOperations(system) - withTestActors(replyToMessages = true) { (actor1, _, _) => - (actor1 ? withTags(1, "number")).futureValue - (actor1 ? withTags(2, "number")).futureValue - (actor1 ? withTags(3, "number")).futureValue - - // delete and wait for confirmation - (actor1 ? DeleteCmd(1)).futureValue shouldBe "deleted-1" - - journalOps.withEventsByPersistenceId()("my-1") { tp => - tp.request(Int.MaxValue) - tp.expectNextPF { case EventEnvelope(Sequence(1), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(2), _, _, _) => } - tp.expectNextPF { case EventEnvelope(Sequence(3), _, _, _) => } - tp.cancel() - } - } - } -} - -class H2LogicalDeleteQueryTest extends LogicalDeleteQueryTest("h2-application.conf") with H2Cleaner