final class R2dbcReadJournal extends ReadJournal with CurrentEventsBySliceQuery with EventsBySliceQuery with CurrentEventsBySliceStartingFromSnapshotsQuery with EventsBySliceStartingFromSnapshotsQuery with EventTimestampQuery with LoadEventQuery with CurrentEventsByPersistenceIdQuery with CurrentEventsByPersistenceIdTypedQuery with EventsByPersistenceIdQuery with EventsByPersistenceIdTypedQuery with CurrentPersistenceIdsQuery with PagedPersistenceIdsQuery with EventsByPersistenceIdStartingFromSnapshotQuery with CurrentEventsByPersistenceIdStartingFromSnapshotQuery
- Alphabetic
- By Inheritance
- R2dbcReadJournal
- CurrentEventsByPersistenceIdStartingFromSnapshotQuery
- EventsByPersistenceIdStartingFromSnapshotQuery
- PagedPersistenceIdsQuery
- CurrentPersistenceIdsQuery
- EventsByPersistenceIdTypedQuery
- EventsByPersistenceIdQuery
- CurrentEventsByPersistenceIdTypedQuery
- CurrentEventsByPersistenceIdQuery
- LoadEventQuery
- EventTimestampQuery
- EventsBySliceStartingFromSnapshotsQuery
- CurrentEventsBySliceStartingFromSnapshotsQuery
- EventsBySliceQuery
- CurrentEventsBySliceQuery
- ReadJournal
- AnyRef
- Any
- Hide All
- Show All
- Public
- Protected
Instance Constructors
- new R2dbcReadJournal(delegate: scaladsl.R2dbcReadJournal)
Value Members
- final def !=(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def ##: Int
- Definition Classes
- AnyRef → Any
- final def ==(arg0: Any): Boolean
- Definition Classes
- AnyRef → Any
- final def asInstanceOf[T0]: T0
- Definition Classes
- Any
- def clone(): AnyRef
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.CloneNotSupportedException]) @IntrinsicCandidate() @native()
- def currentEventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed]
- Definition Classes
- R2dbcReadJournal → CurrentEventsByPersistenceIdQuery
- def currentEventsByPersistenceIdStartingFromSnapshot[Snapshot, Event](persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, transformSnapshot: Function[Snapshot, Event]): Source[EventEnvelope[Event], NotUsed]
- Definition Classes
- R2dbcReadJournal → CurrentEventsByPersistenceIdStartingFromSnapshotQuery
- def currentEventsByPersistenceIdTyped[Event](persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope[Event], NotUsed]
- Definition Classes
- R2dbcReadJournal → CurrentEventsByPersistenceIdTypedQuery
- def currentEventsBySlices[Event](entityType: String, minSlice: Int, maxSlice: Int, offset: Offset): Source[EventEnvelope[Event], NotUsed]
- Definition Classes
- R2dbcReadJournal → CurrentEventsBySliceQuery
- def currentEventsBySlicesStartingFromSnapshots[Snapshot, Event](entityType: String, minSlice: Int, maxSlice: Int, offset: Offset, transformSnapshot: Function[Snapshot, Event]): Source[EventEnvelope[Event], NotUsed]
Same as
currentEventsBySlices
but with the purpose to use snapshots as starting points and thereby reducing number of events that have to be loaded.Same as
currentEventsBySlices
but with the purpose to use snapshots as starting points and thereby reducing number of events that have to be loaded. This can be useful if the consumer start from zero without any previously processed offset or if it has been disconnected for a long while and its offset is far behind.First it loads all snapshots with timestamps greater than or equal to the offset timestamp. There is at most one snapshot per persistenceId. The snapshots are transformed to events with the given
transformSnapshot
function.After emitting the snapshot events the ordinary events with sequence numbers after the snapshots are emitted.
To use
currentEventsBySlicesStartingFromSnapshots
you must enable configurationakka.persistence.r2dbc.query.start-from-snapshot.enabled
and follow instructions in migration guide https://doc.akka.io/libraries/akka-persistence-r2dbc/current/migration-guide.html#eventsBySlicesStartingFromSnapshots- Definition Classes
- R2dbcReadJournal → CurrentEventsBySliceStartingFromSnapshotsQuery
- def currentPersistenceIds(entityType: String, afterId: Option[String], limit: Long): Source[String, NotUsed]
Get the current persistence ids.
Get the current persistence ids.
Note: to reuse existing index, the actual query filters entity types based on persistence_id column and sql LIKE operator. Hence the persistenceId must start with an entity type followed by default separator ("|") from akka.persistence.typed.PersistenceId.
- entityType
The entity type name.
- afterId
The ID to start returning results from, or empty to return all ids. This should be an id returned from a previous invocation of this command. Callers should not assume that ids are returned in sorted order.
- limit
The maximum results to return. Use Long.MAX_VALUE to return all results. Must be greater than zero.
- returns
A source containing all the persistence ids, limited as specified.
- def currentPersistenceIds(afterId: Optional[String], limit: Long): Source[String, NotUsed]
- Definition Classes
- R2dbcReadJournal → PagedPersistenceIdsQuery
- def currentPersistenceIds(): Source[String, NotUsed]
- Definition Classes
- R2dbcReadJournal → CurrentPersistenceIdsQuery
- final def eq(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- def equals(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef → Any
- def eventsByPersistenceId(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope, NotUsed]
- Definition Classes
- R2dbcReadJournal → EventsByPersistenceIdQuery
- def eventsByPersistenceIdStartingFromSnapshot[Snapshot, Event](persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, transformSnapshot: Function[Snapshot, Event]): Source[EventEnvelope[Event], NotUsed]
- Definition Classes
- R2dbcReadJournal → EventsByPersistenceIdStartingFromSnapshotQuery
- def eventsByPersistenceIdTyped[Event](persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long): Source[EventEnvelope[Event], NotUsed]
- Definition Classes
- R2dbcReadJournal → EventsByPersistenceIdTypedQuery
- def eventsBySlices[Event](entityType: String, minSlice: Int, maxSlice: Int, offset: Offset): Source[EventEnvelope[Event], NotUsed]
Query events for given slices.
Query events for given slices. A slice is deterministically defined based on the persistence id. The purpose is to evenly distribute all persistence ids over the slices.
The consumer can keep track of its current position in the event stream by storing the
offset
and restart the query from a givenoffset
after a crash/restart.The supported offset is akka.persistence.query.TimestampOffset and Offset.noOffset.
The timestamp is based on the database
CURRENT_TIMESTAMP
when the event was stored.CURRENT_TIMESTAMP
is the time when the transaction started, not when it was committed. This means that a "later" event may be visible first and when retrieving events after the previously seen timestamp we may miss some events. In distributed SQL databases there can also be clock skews for the database timestamps. For that reason it will perform additional backtracking queries to catch missed events. Events from backtracking will typically be duplicates of previously emitted events. It's the responsibility of the consumer to filter duplicates and make sure that events are processed in exact sequence number order for each persistence id. Such deduplication is provided by the R2DBC Projection.Events emitted by the backtracking don't contain the event payload (
EventBySliceEnvelope.event
is None) and the consumer can load the fullEventBySliceEnvelope
with R2dbcReadJournal.loadEnvelope.The events will be emitted in the timestamp order with the caveat of duplicate events as described above. Events with the same timestamp are ordered by sequence number.
The stream is not completed when it reaches the end of the currently stored events, but it continues to push new events when new events are persisted. Corresponding query that is completed when it reaches the end of the currently stored events is provided by R2dbcReadJournal.currentEventsBySlices.
The slice range cannot span over more than one data partition, which in practise means that the number of Projection instances must be be greater than or equal to the number of data partitions. For example, with 4 data partitions the slice range (0 - 255) is allowed but not (0 - 511). Smaller slice range such as (0 - 127) is also allowed.
- Definition Classes
- R2dbcReadJournal → EventsBySliceQuery
- def eventsBySlicesStartingFromSnapshots[Snapshot, Event](entityType: String, minSlice: Int, maxSlice: Int, offset: Offset, transformSnapshot: Function[Snapshot, Event]): Source[EventEnvelope[Event], NotUsed]
Same as
eventsBySlices
but with the purpose to use snapshots as starting points and thereby reducing number of events that have to be loaded.Same as
eventsBySlices
but with the purpose to use snapshots as starting points and thereby reducing number of events that have to be loaded. This can be useful if the consumer start from zero without any previously processed offset or if it has been disconnected for a long while and its offset is far behind.First it loads all snapshots with timestamps greater than or equal to the offset timestamp. There is at most one snapshot per persistenceId. The snapshots are transformed to events with the given
transformSnapshot
function.After emitting the snapshot events the ordinary events with sequence numbers after the snapshots are emitted.
To use
eventsBySlicesStartingFromSnapshots
you must enable configurationakka.persistence.r2dbc.query.start-from-snapshot.enabled
and follow instructions in migration guide https://doc.akka.io/libraries/akka-persistence-r2dbc/current/migration-guide.html#eventsBySlicesStartingFromSnapshots- Definition Classes
- R2dbcReadJournal → EventsBySliceStartingFromSnapshotsQuery
- final def getClass(): Class[_ <: AnyRef]
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- def hashCode(): Int
- Definition Classes
- AnyRef → Any
- Annotations
- @IntrinsicCandidate() @native()
- final def isInstanceOf[T0]: Boolean
- Definition Classes
- Any
- def loadEnvelope[Event](persistenceId: String, sequenceNr: Long): CompletionStage[EventEnvelope[Event]]
- Definition Classes
- R2dbcReadJournal → LoadEventQuery
- final def ne(arg0: AnyRef): Boolean
- Definition Classes
- AnyRef
- final def notify(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- final def notifyAll(): Unit
- Definition Classes
- AnyRef
- Annotations
- @IntrinsicCandidate() @native()
- def sliceForPersistenceId(persistenceId: String): Int
- Definition Classes
- R2dbcReadJournal → EventsBySliceStartingFromSnapshotsQuery → CurrentEventsBySliceStartingFromSnapshotsQuery → EventsBySliceQuery → CurrentEventsBySliceQuery
- def sliceRanges(numberOfRanges: Int): List[Pair[Integer, Integer]]
- Definition Classes
- R2dbcReadJournal → EventsBySliceStartingFromSnapshotsQuery → CurrentEventsBySliceStartingFromSnapshotsQuery → EventsBySliceQuery → CurrentEventsBySliceQuery
- final def synchronized[T0](arg0: => T0): T0
- Definition Classes
- AnyRef
- def timestampOf(persistenceId: String, sequenceNr: Long): CompletionStage[Optional[Instant]]
- Definition Classes
- R2dbcReadJournal → EventTimestampQuery
- def toString(): String
- Definition Classes
- AnyRef → Any
- final def wait(arg0: Long, arg1: Int): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
- final def wait(arg0: Long): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException]) @native()
- final def wait(): Unit
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.InterruptedException])
Deprecated Value Members
- def finalize(): Unit
- Attributes
- protected[lang]
- Definition Classes
- AnyRef
- Annotations
- @throws(classOf[java.lang.Throwable]) @Deprecated
- Deprecated
(Since version 9)