diff --git a/.claude/settings.local.json b/.claude/settings.json similarity index 100% rename from .claude/settings.local.json rename to .claude/settings.json diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 3c5ed3b..ff78b55 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -4,10 +4,14 @@ version: 2 updates: - package-ecosystem: "github-actions" + cooldown: + default-days: 21 directory: "/" schedule: interval: "daily" - package-ecosystem: "maven" + cooldown: + default-days: 21 directory: "/" ignore: - dependency-name: "*" diff --git a/.github/linters/.jscpd.json b/.github/linters/.jscpd.json index eac61cc..78dfc78 100644 --- a/.github/linters/.jscpd.json +++ b/.github/linters/.jscpd.json @@ -1,3 +1,3 @@ { - "threshold": 4 -} \ No newline at end of file + "threshold": 6 +} diff --git a/.github/workflows/bearer.yaml b/.github/workflows/bearer.yaml new file mode 100644 index 0000000..751b177 --- /dev/null +++ b/.github/workflows/bearer.yaml @@ -0,0 +1,25 @@ +name: bearer + +on: + push: + branches-ignore: [main] + pull_request: + branches: [main] + +permissions: {} + +jobs: + rule_check: + permissions: + contents: read + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: Bearer + uses: bearer/bearer-action@v2 + with: + skip-rule: "java_lang_sqli,java_lang_observable_timing" diff --git a/.github/workflows/maven-darwin.yaml b/.github/workflows/maven-darwin.yaml new file mode 100644 index 0000000..ac01efd --- /dev/null +++ b/.github/workflows/maven-darwin.yaml @@ -0,0 +1,84 @@ +name: maven darwin + +on: + pull_request: + branches: [main] + schedule: + - cron: "15 7 * * *" + workflow_dispatch: + +permissions: {} + +jobs: + maven-darwin: + outputs: + status: ${{ job.status }} + permissions: + contents: read + pull-requests: write + runs-on: macos-latest + strategy: + fail-fast: false + matrix: + java-distribution: ["temurin"] + java-version: ["17", "21"] + senzingsdk-version: [production-v4, staging-v4] + + steps: + - name: checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + + - uses: actions/setup-java@v5 + with: + distribution: ${{ matrix.java-distribution }} + java-version: ${{ matrix.java-version }} + + - name: install Senzing SDK + uses: senzing-factory/github-action-install-senzing-sdk@v3 + with: + senzingsdk-version: ${{ matrix.senzingsdk-version }} + + - name: configure environment + run: | + SENZING_PATH="${HOME}/senzing" + { + echo "SENZING_PATH=${SENZING_PATH}" + echo "DYLD_LIBRARY_PATH=${SENZING_PATH}/er/lib:${SENZING_PATH}/er/lib/macos" + } >> "$GITHUB_ENV" + + - name: Build with Maven + run: | + java -jar "$SENZING_PATH"/er/sdk/java/sz-sdk.jar -x + mvn clean install -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -Pjacoco -Djacoco.haltOnFailure=false -Djacoco.ignoreFailure=true + + - name: Jacoco Report to PR + id: jacoco + uses: madrapps/jacoco-report@v1.7.2 + with: + fail-emoji: ":red_circle:" + min-coverage-changed-files: 10 + min-coverage-overall: 10 + pass-emoji: ":green_circle:" + paths: ${{ github.workspace }}/target/site/**/*.xml + skip-if-no-changes: true + title: Code Coverage + token: ${{ secrets.GITHUB_TOKEN }} + update-comment: true + + - name: Fail PR if overall coverage is less than 10% + if: ${{ steps.jacoco.outputs.coverage-overall < 10.0 }} + uses: actions/github-script@v8 + with: + script: | + core.setFailed('Overall coverage is less than 10%!') + + slack-notification: + needs: [maven-darwin] + if: ${{ always() && contains(fromJSON('["failure", "cancelled"]'), needs.maven-darwin.outputs.status ) && github.event_name == 'schedule' }} + secrets: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + uses: senzing-factory/build-resources/.github/workflows/build-failure-slack-notification.yaml@v3 + with: + job-status: ${{ needs.maven-darwin.outputs.status }} diff --git a/.github/workflows/maven-dependency-review.yaml b/.github/workflows/maven-dependency-review.yaml new file mode 100644 index 0000000..a617a95 --- /dev/null +++ b/.github/workflows/maven-dependency-review.yaml @@ -0,0 +1,27 @@ +name: maven dependency review + +on: + pull_request: + branches: [main] + +permissions: {} + +jobs: + dependency-submission-maven: + permissions: + contents: read + runs-on: ubuntu-latest + strategy: + matrix: + include: + - java-version: "17" + java-distribution: "temurin" + + steps: + - name: checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + + - name: "Dependency Review" + uses: actions/dependency-review-action@v4 diff --git a/.github/workflows/maven-linux.yaml b/.github/workflows/maven-linux.yaml new file mode 100644 index 0000000..758db13 --- /dev/null +++ b/.github/workflows/maven-linux.yaml @@ -0,0 +1,81 @@ +name: maven linux + +on: + push: + branches-ignore: [main] + pull_request: + branches: [main] + schedule: + - cron: "15 7 * * *" + +permissions: {} + +jobs: + maven-linux: + outputs: + status: ${{ job.status }} + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + java-version: ["17", "21"] + java-distribution: ["temurin"] + senzingsdk-version: [production-v4, staging-v4] + + steps: + - name: checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + + - uses: actions/setup-java@v5 + with: + java-version: ${{ matrix.java-version }} + distribution: ${{ matrix.java-distribution }} + + - name: install Senzing runtime + uses: senzing-factory/github-action-install-senzing-sdk@v3 + with: + packages-to-install: "senzingsdk-runtime senzingsdk-setup" + senzingsdk-version: ${{ matrix.senzingsdk-version }} + + - name: Build with Maven + env: + SENZING_PATH: "/opt/senzing" + run: | + java -jar "$SENZING_PATH"/er/sdk/java/sz-sdk.jar -x + mvn clean install -Pcheckstyle,jacoco,spotbugs -Djacoco.haltOnFailure=false -Djacoco.ignoreFailure=true -Dsenzing.support.dir="/opt/senzing/data" -B -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn + + - name: Jacoco Report to PR + id: jacoco + if: github.event_name == 'pull_request' + uses: madrapps/jacoco-report@v1.7.2 + with: + paths: ${{ github.workspace }}/target/site/**/*.xml + token: ${{ secrets.GITHUB_TOKEN }} + min-coverage-overall: 10 + min-coverage-changed-files: 10 + title: Code Coverage + update-comment: true + skip-if-no-changes: true + pass-emoji: ":green_circle:" + fail-emoji: ":red_circle:" + + - name: Fail PR if overall coverage is less than 10% + if: ${{ github.event_name == 'pull_request' && steps.jacoco.outputs.coverage-overall < 10.0 }} + uses: actions/github-script@v8 + with: + script: | + core.setFailed('Overall coverage is less than 10%!') + + slack-notification: + needs: [maven-linux] + if: ${{ always() && contains(fromJSON('["failure", "cancelled"]'), needs.maven-linux.outputs.status ) && github.event_name == 'schedule' }} + secrets: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + uses: senzing-factory/build-resources/.github/workflows/build-failure-slack-notification.yaml@v3 + with: + job-status: ${{ needs.maven-linux.outputs.status }} diff --git a/.github/workflows/maven-windows.yaml b/.github/workflows/maven-windows.yaml new file mode 100644 index 0000000..4d95431 --- /dev/null +++ b/.github/workflows/maven-windows.yaml @@ -0,0 +1,81 @@ +name: maven windows + +on: + pull_request: + branches: [main] + schedule: + - cron: "15 7 * * *" + workflow_dispatch: + +permissions: {} + +jobs: + maven-windows: + outputs: + status: ${{ job.status }} + permissions: + contents: read + pull-requests: write + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + java-version: ["17", "21"] + java-distribution: ["temurin"] + senzingsdk-version: [production-v4, staging-v4] + + steps: + - name: checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + + - uses: actions/setup-java@v5 + with: + java-version: ${{ matrix.java-version }} + distribution: ${{ matrix.java-distribution }} + + - name: install Senzing SDK + uses: senzing-factory/github-action-install-senzing-sdk@v3 + with: + senzingsdk-version: ${{ matrix.senzingsdk-version }} + + - name: Add to "Path" environment variable + run: | + Add-Content $env:GITHUB_PATH "$Env:USERPROFILE\Senzing\er\lib" + + - name: Build with Maven + run: | + $Env:SENZING_PATH = "$Env:USERPROFILE\senzing" + java -jar "$Env:SENZING_PATH\er\sdk\java\sz-sdk.jar" -x + mvn clean install -B "-Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn" -Pjacoco "-Djacoco.haltOnFailure=false" "-Djacoco.ignoreFailure=true" "-Dsenzing.path=$Env:USERPROFILE\Senzing" + + - name: Jacoco Report to PR + id: jacoco + uses: madrapps/jacoco-report@v1.7.2 + with: + paths: ${{ github.workspace }}/target/site/**/*.xml + token: ${{ secrets.GITHUB_TOKEN }} + min-coverage-overall: 10 + min-coverage-changed-files: 10 + title: Code Coverage + update-comment: true + skip-if-no-changes: true + pass-emoji: ":green_circle:" + fail-emoji: ":red_circle:" + + - name: Fail PR if overall coverage is less than 10% + if: ${{ steps.jacoco.outputs.coverage-overall < 10.0 }} + uses: actions/github-script@v8 + with: + script: | + core.setFailed('Overall coverage is less than 10%!') + + slack-notification: + needs: [maven-windows] + if: ${{ always() && contains(fromJSON('["failure", "cancelled"]'), needs.maven-windows.outputs.status ) && github.event_name == 'schedule' }} + secrets: + SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} + uses: senzing-factory/build-resources/.github/workflows/build-failure-slack-notification.yaml@v3 + with: + job-status: ${{ needs.maven-windows.outputs.status }} diff --git a/.gitignore b/.gitignore index d13978d..5046800 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ target # Visual Studio code .vscode/* !.vscode/cspell.json +!.vscode/settings.json *.code-workspace .history .project diff --git a/.vscode/cspell.json b/.vscode/cspell.json index f9dca27..0b52206 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -2,87 +2,92 @@ "version": "0.2", "language": "en", "words": [ - "aarch", - "aayushatharva", - "alnum", - "alse", - "amqp", - "amqps", - "apidocs", - "armeria", - "awssdk", - "backgrounding", - "BIGSERIAL", - "boringssl", - "Caceres", - "CCLA", - "cmdline", - "CODEOWNER", - "CONFIGFILE", - "databind", - "datamart", - "Dgit", - "Dproject", - "Dskip", - "Dtest", - "dsrc", - "Entityid", - "epoll", - "errule", - "esbenp", - "fasterxml", - "findbugs", - "findsecbugs", - "FTYPE", - "glassfish", - "ICLA", - "inclusivity", - "interprocess", - "jacoco", - "javadocs", - "javassist", - "javax", - "jaxb", - "jdbc", - "joda", - "kqueue", - "linecorp", - "mkey", - "okey", - "ossrh", - "ounts", - "PHOO", - "PLPGSQL", - "PLPGSQL", - "PRNG", - "Postrgre", - "proguard", - "prin", - "PRNG", - "rabbitmq", - "rawtypes", - "reinit", - "sched", - "Senzing", - "sonatype", - "spotbugs", - "stackoverflow", - "STRFTIME", - "thirdparty", - "toplevel", - "unleased", - "upsert", - "uring", - "UNTRACKING", - "vhost", - "xerial", - "Xlint", - "xstream" - ], - "ignorePaths": [ - ".git/**", - ".gitignore", - "target/**", - ".claude/**" - ] + "aarch", + "aayushatharva", + "alnum", + "alse", + "amqp", + "amqps", + "apidocs", + "armeria", + "awssdk", + "backgrounding", + "BIGSERIAL", + "boringssl", + "Caceres", + "CCLA", + "cmdline", + "CODEOWNER", + "CONFIGFILE", + "cooldown", + "databind", + "datamart", + "Dgit", + "Djacoco", + "Dorg", + "Dproject", + "Dsenzing", + "Dskip", + "dsrc", + "Dtest", + "DYLD", + "Entityid", + "epoll", + "errule", + "esbenp", + "fasterxml", + "findbugs", + "findsecbugs", + "FTYPE", + "glassfish", + "ICLA", + "inclusivity", + "interprocess", + "jacoco", + "javadocs", + "javassist", + "javax", + "jaxb", + "jdbc", + "joda", + "kqueue", + "linecorp", + "madrapps", + "mkey", + "okey", + "ossrh", + "ounts", + "Pcheckstyle", + "PHOO", + "Pjacoco", + "PLPGSQL", + "Postrgre", + "prin", + "PRNG", + "proguard", + "rabbitmq", + "rawtypes", + "reinit", + "sched", + "Senzing", + "senzingsdk", + "sonatype", + "spotbugs", + "sqli", + "stackoverflow", + "STRFTIME", + "temurin", + "thirdparty", + "toplevel", + "unleased", + "UNTRACKING", + "upsert", + "uring", + "USERPROFILE", + "vhost", + "xerial", + "Xlint", + "xstream" + ], + "ignorePaths": [".git/**", ".gitignore", "target/**", ".claude/**"] } diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..fc7ffbd --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,12 @@ +{ + "[json]": { + "editor.defaultFormatter": "esbenp.prettier-vscode" + }, + "[markdown]": { + "editor.defaultFormatter": "esbenp.prettier-vscode" + }, + "[yaml]": { + "editor.defaultFormatter": "esbenp.prettier-vscode" + }, + "editor.formatOnSave": false +} diff --git a/checkstyle-suppressions.xml b/checkstyle-suppressions.xml index b597970..c9d7b2e 100644 --- a/checkstyle-suppressions.xml +++ b/checkstyle-suppressions.xml @@ -16,4 +16,5 @@ + diff --git a/data-mart-reports.yaml b/data-mart-reports.yaml index 9ec8a52..43ff58f 100644 --- a/data-mart-reports.yaml +++ b/data-mart-reports.yaml @@ -1,3 +1,5 @@ +# checkov:skip=CKV_OPENAPI_4:Public API - no auth required +# checkov:skip=CKV_OPENAPI_5:Public API - no auth required openapi: 3.0.1 info: title: Senzing Data Mart Reports REST API @@ -32,7 +34,7 @@ paths: data mart. description: >- Gets the entity and record counts in total and by data source from the - data mart. *NOTE*: Data mart statistics may be slightly delayed from + data mart. *NOTE*: Data mart statistics may be slightly delayed from the entity repository. operationId: getLoadedStatistics parameters: @@ -61,7 +63,7 @@ paths: data mart. description: >- Gets the entity and record counts for a specific data source from the - data mart. If no records have been loaded for the data source then + data mart. If no records have been loaded for the data source then this returns a 200 OK response with counts of zero. However, if the data source is not recognized then a 404 Not Found response is returned. *NOTE*: Data mart statistics may be slightly delayed from the entity @@ -98,7 +100,7 @@ paths: Gets the entity ID's of the entities having at least one record from the respective data source. If no records have been loaded for that data source then this will return a 200 OK response that will have an empty array of - entity ID's rather than giving a 404 Not Found response. However, the data + entity ID's rather than giving a 404 Not Found response. However, the data source code in the path is not configured then this will return a 404 Not Found response. *NOTE*: Data mart statistics may be slightly delayed from the entity repository. @@ -124,7 +126,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not strictly greater-than** the `sampleSize`. "404": description: >- @@ -138,7 +140,7 @@ paths: summary: >- Gets the entity counts by entity size (Entity Size Breakdown). description: >- - Gets the number of entities in the repository for each entity size + Gets the number of entities in the repository for each entity size that exists. *NOTE*: Data mart statistics may be slightly delayed from the entity repository. operationId: getEntitySizeBreakdown @@ -225,7 +227,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "500": description: If a server-side error occurred. @@ -234,7 +236,7 @@ paths: tags: - Statistics summary: >- - Gets the entity counts by the number of entity relations (Entity + Gets the entity counts by the number of entity relations (Entity Relation Breakdown). description: >- Gets the number of entities in the repository for each number of entity @@ -324,7 +326,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "500": description: If a server-side error occurred. @@ -333,7 +335,7 @@ paths: tags: - Statistics summary: >- - Gets the summary statistics for each data source versus every other + Gets the summary statistics for each data source versus every other data source including itself. description: >- Gets the summary statistics for each data source versus every other @@ -433,7 +435,7 @@ paths: summary: >- Gets the cross-summary statistics for matches for entities having at least one record from a primary data source and at least one **other** - record from another data source (which may be the same data source), + record from another data source (which may be the same data source), optionally for one or more combination of match key and principle. description: >- Gets the cross-summary statistics for matches between a primary data @@ -624,15 +626,15 @@ paths: first data source and at least one other record from the versus data source. description: >- - Gets the entity ID's of the entities having at least one record from the + Gets the entity ID's of the entities having at least one record from the first data source and at least one record from the versus data source. If no entities have at least one record from the first data source and at least - one other record from the versus data source then this will return a 200 OK + one other record from the versus data source then this will return a 200 OK response that will have an empty array of entity ID's rather than giving a 404 Not Found response. Further, if there are no entity ID's for entities satisfying the bound conditions of the request then similarly a 200 OK response is returned but the array of ID's contained in the response will be - empty. However, if either data source code is not found in the configuration + empty. However, if either data source code is not found in the configuration then a 404 Not Found response is returned. *NOTE*: Data mart statistics may be slightly delayed from the entity repository. operationId: getEntityIdsForCrossMatches @@ -660,7 +662,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -712,7 +714,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -764,7 +766,7 @@ paths: $ref: "#/components/schemas/SzRelationsPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -816,7 +818,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -868,7 +870,7 @@ paths: $ref: "#/components/schemas/SzRelationsPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -887,7 +889,7 @@ paths: Gets the entity ID's of the entities having at least one record from the first data source and a possible relationship to an entity having at least one record from the versus data source. If there are no such entities then - this will return a 200 OK response that will have an empty array of + this will return a 200 OK response that will have an empty array of entity ID values rather than giving a 404 Not Found response. Further, if there are no entities satisfying the bound conditions of the request then similarly a 200 OK response is returned but the entity ID array contained @@ -919,7 +921,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -944,7 +946,7 @@ paths: bound conditions of the request then similarly a 200 OK response is returned but the `SzRelation` array contained in the response will be empty. However, if either data source code is not found in the configuration then a 404 Not - Found response is returned. *NOTE*: Data mart statistics may be slightly + Found response is returned. *NOTE*: Data mart statistics may be slightly delayed from the entity repository. operationId: getPossiblyCrossRelatedRelations parameters: @@ -971,7 +973,7 @@ paths: $ref: "#/components/schemas/SzRelationsPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -990,7 +992,7 @@ paths: Gets the entity ID's of the entities having at least one record from the first data source and a disclosed relationship to an entity having at least one record from the versus data source. If there are no such entities then - this will return a 200 OK response that will have an empty array of + this will return a 200 OK response that will have an empty array of entity ID values rather than giving a 404 Not Found response. Further, if there are no entities satisfying the bound conditions of the request then similarly a 200 OK response is returned but the entity ID array contained @@ -1022,7 +1024,7 @@ paths: $ref: "#/components/schemas/SzEntitiesPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -1047,7 +1049,7 @@ paths: bound conditions of the request then similarly a 200 OK response is returned but the `SzRelation` array contained in the response will be empty. However, if either data source code is not found in the configuration then a 404 Not - Found response is returned. *NOTE*: Data mart statistics may be slightly + Found response is returned. *NOTE*: Data mart statistics may be slightly delayed from the entity repository. operationId: getDisclosedCrossRelatedRelations parameters: @@ -1074,7 +1076,7 @@ paths: $ref: "#/components/schemas/SzRelationsPage" "400": description: >- - If the `sampleSize` and `pageSize` parameter are both specified + If the `sampleSize` and `pageSize` parameter are both specified and the `pageSize` is **not** strictly greater-than the `sampleSize`. "404": description: >- @@ -1107,7 +1109,7 @@ components: required: false description: >- The optional parameter to specify a principle for which the statistics - are being requested. Specify an asterisk (`*`) if the statistics for + are being requested. Specify an asterisk (`*`) if the statistics for all principles should be returned. schema: type: string @@ -1130,7 +1132,7 @@ components: required: false description: >- The optional parameter to specify a match key for which the statistics - are being requested. Specify an asterisk (`*`) if the statistics for + are being requested. Specify an asterisk (`*`) if the statistics for all match keys should be returned. schema: type: string @@ -1158,7 +1160,7 @@ components: If not specified, then the default value is taken as `0` if a `boundType` of `INCLUSIVE_LOWER` or `EXCLUSIVE_LOWER` is being used, otherwise for a `boundType` of `INCLUSIVE_UPPER` or `EXCLUSIVE_UPPER` - it defaults to `max` (indicating the maximum legal value of an + it defaults to `max` (indicating the maximum legal value of an entity ID). schema: type: string @@ -1169,15 +1171,15 @@ components: description: >- The optional parameter to provide for "paging" through relationships associated with a statistic. The value is specified as a bound on the - entity ID's in the `SzRelation` values returned and contains two + entity ID's in the `SzRelation` values returned and contains two entity ID values separated by a colon (e.g.: `1000:5500`). Either of the entity ID's can be specified as `max` to indicate the maximum legal value of an entity ID (e.g.: `max:max` or `1000:max`). If `max` is specified by itself it is interpreted as `max:max`. The type of bound is given by the `boundType` parameter. For example, by default the `boundType` is `EXCLUSIVE_LOWER` so the returned `SzRelation` values - must be canonically greater than the relationship described by the - entity ID values encoded in the bound. `SzRelation` values are ordered + must be canonically greater than the relationship described by the + entity ID values encoded in the bound. `SzRelation` values are ordered on the first entity ID value and then on the second related entity ID value. To move to the "next page" of relationships specify the encoded value of the entity ID and related ID of the greatest (last) `SzRelation` @@ -1201,12 +1203,12 @@ components: required: false description: >- The optional parameter to provide for "paging" through results associated - with a statistic. The value is specified as an `SzBoundType` and the + with a statistic. The value is specified as an `SzBoundType` and the bound value is given by the `bound` parameter. For example, by default the `boundType` is `EXCLUSIVE_LOWER` so the returned values must satisfy the condition that they are strictly greater than the bound value. To move to the "next page" of results specify the greatest result value from the - current page and use `boundType` of `EXCLUSIVE_LOWER`. To move to the + current page and use `boundType` of `EXCLUSIVE_LOWER`. To move to the "previous page" of results specify the least result value on the current page and use a `boundType` of `EXCLUSIVE_UPPER`. To change the number of results shown on the current page specify the least result value on the @@ -1215,18 +1217,19 @@ components: * `INCLUSIVE_LOWER` - The bound represents an inclusive lower bound whereby values satisfying the bound will be greater-than or equal to the value associated with the bound. - * `EXCLUSIVE_LOWER` - The bound represents an exclusive lower bound whereby - values satisfying the bound will be strictly greater-than + * `EXCLUSIVE_LOWER` - The bound represents an exclusive lower bound whereby + values satisfying the bound will be strictly greater-than the value associated with the bound. * `INCLUSIVE_UPPER` - The bound represents an inclusive upper bound whereby values satisfying the bound will be less-than or equal to the value associated with the bound. - * `EXCLUSIVE_UPPER` - The bound represents an exclusive upper bound whereby - values satisfying the bound will be strictly less-than + * `EXCLUSIVE_UPPER` - The bound represents an exclusive upper bound whereby + values satisfying the bound will be strictly less-than the value associated with the bound. schema: + allOf: + - $ref: "#/components/schemas/SzBoundType" default: EXCLUSIVE_LOWER - $ref: "#/components/schemas/SzBoundType" pageSizeQueryParam: in: query name: pageSize @@ -1234,13 +1237,13 @@ components: description: >- The optional parameter to limit the number of returned results per page. If not specified then a default value is determined. If the `sampleSize` - parameter is provided, then this defaults to 20 times the value of the + parameter is provided, then this defaults to 20 times the value of the `sampleSize` parameter. If the `sampleSize` parameter is **not** provided - then this simply defaults to `100`. If using the `sampleSize` query - parameter and specifying a value for the `pageSize` then the `pageSize` + then this simply defaults to `100`. If using the `sampleSize` query + parameter and specifying a value for the `pageSize` then the `pageSize` should be greater than the `sampleSize` by at least an order of magnitude in order tp provide an adequate set of results from which to choose a random - sample. For example, a `pageSize` of `1000` would be appropriate for a + sample. For example, a `pageSize` of `1000` would be appropriate for a `sampleSize` of `50` or `100`. schema: type: integer @@ -1251,9 +1254,9 @@ components: name: sampleSize required: false description: >- - The optional parameter to extract a sample of this size from the page of + The optional parameter to extract a sample of this size from the page of results being returned. When using this parameter the caller should - typically use a much larger page size (e.g.: 1000 or 5000) and pull a + typically use a much larger page size (e.g.: 1000 or 5000) and pull a random sample (e.g.: 50 to 100). To obtain another sample with no overlap with the previous sample(s), simply change the `bound` and `boundType` parameter so the eligible results for the page are outside the range @@ -1335,14 +1338,15 @@ components: format: int64 nullable: false boundType: + allOf: + - $ref: "#/components/schemas/SzBoundType" description: >- The `SzBoundType` associated with the `bound` value to describe how the bound was applied. - $ref: "#/components/schemas/SzBoundType" nullable: false pageSize: description: >- - The requested page size representing the maximum number of + The requested page size representing the maximum number of entities that were included in the page. type: integer format: int32 @@ -1370,9 +1374,9 @@ components: nullable: true pageMinimumValue: description: >- - The minimum entity ID of the entire entity page. This will - be the same as `minimumValue` if `sampleSize` was not - specified, however, if `sampleSize` was specified then this + The minimum entity ID of the entire entity page. This will + be the same as `minimumValue` if `sampleSize` was not + specified, however, if `sampleSize` was specified then this will be the minimum entity ID value of all the candidate entities on the page that were used for random sample selection even if that entity was not randomly selected. **NOTE:** This @@ -1382,9 +1386,9 @@ components: nullable: true pageMaximumValue: description: >- - The maximum entity ID of the entire entity page. This will - be the same as `maximumValue` if `sampleSize` was not - specified, however, if `sampleSize` was specified then this + The maximum entity ID of the entire entity page. This will + be the same as `maximumValue` if `sampleSize` was not + specified, however, if `sampleSize` was specified then this will be the maximum entity ID value of all the candidate entities on the page that were used for random sample selection even if that entity was not randomly selected. **NOTE:** This @@ -1394,14 +1398,14 @@ components: nullable: true totalEntityCount: description: >- - The total number of entities representing the set of all + The total number of entities representing the set of all possible results across all pages. type: integer format: int64 nullable: false beforePageCount: description: >- - The number of entities in the set that exist on pages before + The number of entities in the set that exist on pages before this page. type: integer format: int64 @@ -1418,6 +1422,7 @@ components: An array of `SzEntity` instances describing the entities for the page. The array will be in ascending order of entity ID. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzEntity" SzRelationsPage: @@ -1438,19 +1443,20 @@ components: description: >- The relationship bound value that contains two (2) entity ID values separated by a colon (e.g.: `1000:5005`). The first entity ID value - identifies the first entity in the relationship and the second entity + identifies the first entity in the relationship and the second entity ID value identifies the related entity in the relationship. type: string nullable: false boundType: + allOf: + - $ref: "#/components/schemas/SzBoundType" description: >- - The `SzBoundType` associated with the `entityBound` and + The `SzBoundType` associated with the `entityBound` and `relatedBound` values to describe how the bound was applied. - $ref: "#/components/schemas/SzBoundType" nullable: false pageSize: description: >- - The requested page size representing the maximum number of + The requested page size representing the maximum number of `SzRelation`'s' that were included in the page. type: integer format: int32 @@ -1480,7 +1486,7 @@ components: values separated by a colon (e.g.: `1000:5005`). The first entity ID value identifies the greatest value of first entity in the relationship and the second entity ID value identifies - the greatest value of those entity ID's related to the first + the greatest value of those entity ID's related to the first entity. **NOTE:** This field is absent or `null` if there are no results. type: string @@ -1488,8 +1494,8 @@ components: pageMinimumValue: description: >- The minimum relation value of the entire relations page. This - will be the same as `minimumValue` if `sampleSize` was not - specified, however, if `sampleSize` was specified then this + will be the same as `minimumValue` if `sampleSize` was not + specified, however, if `sampleSize` was specified then this will be the minimum relation value of all the candidate relations on the page that were used for random sample selection even if that relation was not randomly selected. This is @@ -1504,8 +1510,8 @@ components: pageMaximumValue: description: >- The maximum relation value of the entire relations page. This - will be the same as `maximumValue` if `sampleSize` was not - specified, however, if `sampleSize` was specified then this + will be the same as `maximumValue` if `sampleSize` was not + specified, however, if `sampleSize` was specified then this will be the maximum relation value of all the candidate relations on the page that were used for random sample selection even if that relation was not randomly selected. This is @@ -1519,7 +1525,7 @@ components: nullable: true totalRelationCount: description: >- - The total number of relationships representing the set of all + The total number of relationships representing the set of all possible results across all pages. type: integer format: int64 @@ -1541,9 +1547,10 @@ components: relations: description: >- An array of `SzRelation`'s describing the relationships for the page. - The `SzRelation` array will be in ascending order of the first + The `SzRelation` array will be in ascending order of the first entity ID and then the second related entity ID. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRelation" SzLoadedStats: @@ -1565,14 +1572,14 @@ components: nullable: false totalEntityCount: description: >- - The total number of entities that have been resolved from + The total number of entities that have been resolved from the loaded records. type: integer format: int64 nullable: false totalUnmatchedRecordCount: description: >- - The total number of records that did not match against any + The total number of records that did not match against any other records and belong to singleton entities. This doubles as the count of all entities that only contain a single record. type: integer @@ -1583,6 +1590,7 @@ components: An array of `SzSourceLoadedStats` describing the entity and record counts by data source. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzSourceLoadedStats" SzSourceLoadedStats: @@ -1610,7 +1618,7 @@ components: nullable: false entityCount: description: >- - The number of entities that have at least one record from the + The number of entities that have at least one record from the associated data source. type: integer format: int64 @@ -1618,7 +1626,7 @@ components: unmatchedRecordCount: description: >- The total number of records that have been loaded for the associated - data source that did *NOT* match against any other records. This + data source that did *NOT* match against any other records. This represents the number of entities having a record from this data source where that is the *ONLY* (single) record in the entity. type: integer @@ -1678,10 +1686,11 @@ components: description: >- The array of `EntitySizeCount` instances describing the number of entities having each number of composite records. If there - are no entities having a specific composite record count then + are no entities having a specific composite record count then no entry is included in the array for that entity size. The array will be in descending order of entity size. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzEntitySizeCount" SzEntityRelationsBreakdown: @@ -1696,10 +1705,11 @@ components: description: >- The array of `EntityRelationsCount` instances describing the number of entities having each distinct number of entity relations. If - there are no entities having a specific entity relations count then + there are no entities having a specific entity relations count then no entry is included in the array for that number of entity relations. The array will be in descending order of number of entity relations. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzEntityRelationsCount" SzMatchCounts: @@ -1736,7 +1746,7 @@ components: SzRelationCounts: description: >- Describes the entity, record and relationship counts for the respective relation - type for entities having at least one record from the primary data source to + type for entities having at least one record from the primary data source to entities having at least one record from the "versus" data source. type: object properties: @@ -1755,7 +1765,7 @@ components: entityCount: description: >- The number of entities having at least one record from the primary data - source related by a relationship of the respective relationship type to + source related by a relationship of the respective relationship type to an entity with at least one record from the "versus" data source. type: integer format: int64 @@ -1796,14 +1806,15 @@ components: type: string matches: description: >- - The array of `SzMatchCounts` describing the entity and record counts - associated with matches from records of the primary data source to at - least one record of the "versus" data source for each combination of + The array of `SzMatchCounts` describing the entity and record counts + associated with matches from records of the primary data source to at + least one record of the "versus" data source for each combination of match key and principle that were requested. If all were requested, this includes the cases where either or both of the match key and principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzMatchCounts" ambiguousMatches: @@ -1817,6 +1828,7 @@ components: of the match key and principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRelationCounts" possibleMatches: @@ -1830,6 +1842,7 @@ components: match key and principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRelationCounts" possibleRelations: @@ -1843,6 +1856,7 @@ components: match key and principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRelationCounts" disclosedRelations: @@ -1856,6 +1870,7 @@ components: match key and principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRelationCounts" SzCrossSourceMatchCounts: @@ -1878,14 +1893,15 @@ components: type: string counts: description: >- - The array of `SzMatchCounts` describing the entity and record counts - associated with matches from records of the primary data source to at - least one record of the "versus" data source for each combination of + The array of `SzMatchCounts` describing the entity and record counts + associated with matches from records of the primary data source to at + least one record of the "versus" data source for each combination of match key and principle that were requested. If all were requested, this includes the cases where either or both of the match key and principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzMatchCounts" SzCrossSourceRelationCounts: @@ -1907,14 +1923,15 @@ components: The versus data source in the cross comparison. type: string relationType: + allOf: + - $ref: "#/components/schemas/SzRelationType" description: >- The `SzRelationType` describing the type of relationship match for the returned statistics. - $ref: "#/components/schemas/SzRelationType" counts: description: >- The array of `SzRelationCounts` describing the entity, record and - relationship counts associated with the relationships of the + relationship counts associated with the relationships of the associated `relationType` from entities having at least one record from the primary data source to entities having at least at least one record from the "versus" data source for each combination of @@ -1923,6 +1940,7 @@ components: principle are absent or `null` indicating tracking across all match keys and/or principles. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRelationCounts" SzSourceSummary: @@ -1957,6 +1975,7 @@ components: statistics between the associated data source versus every data source (including itself). type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzCrossSourceSummary" SzSummaryStats: @@ -1969,6 +1988,7 @@ components: The array of `SzSourceSummary` instances describing the summary statistics for every configured data source. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzSourceSummary" SzRecord: @@ -1983,7 +2003,7 @@ components: properties: dataSource: description: >- - The data source code identifying the data source from + The data source code identifying the data source from which the record was loaded. type: string recordId: @@ -2039,6 +2059,7 @@ components: description: >- The array of `SzRecord` describing the records for this entity. type: array + maxItems: 100000 items: $ref: "#/components/schemas/SzRecord" SzRelation: @@ -2050,17 +2071,20 @@ components: - relatedEntity properties: entity: + allOf: + - $ref: "#/components/schemas/SzEntity" description: >- The entity describing the first entity in the relationship. - $ref: "#/components/schemas/SzEntity" relatedEntity: + allOf: + - $ref: "#/components/schemas/SzEntity" description: >- The entity describing the second entity in the relationship. - $ref: "#/components/schemas/SzEntity" matchType: + allOf: + - $ref: "#/components/schemas/SzRelationType" description: >- The `SzRelationType` describing the type of the relationship. - $ref: "#/components/schemas/SzRelationType" matchKey: description: >- The match key describing what features matched between @@ -2078,14 +2102,14 @@ components: * `INCLUSIVE_LOWER` - The bound represents an inclusive lower bound whereby values satisfying the bound will be greater-than or equal to the value associated with the bound. - * `EXCLUSIVE_LOWER` - The bound represents an exclusive lower bound whereby - values satisfying the bound will be strictly greater-than + * `EXCLUSIVE_LOWER` - The bound represents an exclusive lower bound whereby + values satisfying the bound will be strictly greater-than the value associated with the bound. * `INCLUSIVE_UPPER` - The bound represents an inclusive upper bound whereby values satisfying the bound will be less-than or equal to the value associated with the bound. - * `EXCLUSIVE_UPPER` - The bound represents an exclusive upper bound whereby - values satisfying the bound will be strictly less-than + * `EXCLUSIVE_UPPER` - The bound represents an exclusive upper bound whereby + values satisfying the bound will be strictly less-than the value associated with the bound. type: string enum: diff --git a/pom.xml b/pom.xml index 8d2bd00..20974c6 100644 --- a/pom.xml +++ b/pom.xml @@ -42,7 +42,14 @@ 2.20.1 pom import - + + + io.netty + netty-bom + 4.2.8.Final + pom + import + @@ -61,37 +68,31 @@ senzing-commons 4.0.0-beta.1.6 - - com.google.code.findbugs - jsr305 - 3.0.2 - provided - - - com.linecorp.armeria - armeria - - - io.netty - netty-transport-native-epoll - - - io.netty - netty-transport-native-kqueue - - - - io.netty - netty-resolver-dns-native-macos - - - io.netty.incubator - netty-incubator-transport-native-io_uring - - + + com.linecorp.armeria + armeria + + + io.netty + netty-transport-native-epoll + + + io.netty + netty-transport-native-kqueue + + + + io.netty + netty-resolver-dns-native-macos + + + io.netty.incubator + netty-incubator-transport-native-io_uring + + org.xerial diff --git a/src/main/java/com/senzing/datamart/SzReplicator.java b/src/main/java/com/senzing/datamart/SzReplicator.java index b923f3c..4d51fd7 100644 --- a/src/main/java/com/senzing/datamart/SzReplicator.java +++ b/src/main/java/com/senzing/datamart/SzReplicator.java @@ -27,7 +27,6 @@ import javax.json.JsonObject; import javax.json.JsonObjectBuilder; -import java.io.File; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Method; @@ -211,7 +210,7 @@ protected static void commandLineStart(String[] args, CommandLineParser cmdLineP System.err.println(); System.err.println(e.getMessage()); System.err.println(); - e.printStackTrace(); + System.err.println(formatStackTrace(e.getStackTrace())); } System.exit(1); } @@ -245,7 +244,8 @@ protected static void commandLineStart(String[] args, CommandLineParser cmdLineP } } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); exitOnError(e); } @@ -284,7 +284,8 @@ public void run() { } } } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); } finally { this.messageConsumer.destroy(); @@ -314,11 +315,11 @@ public void shutdown() { } /** - * Gets the {@link Map} of {@link Statistic} keys to {@link Number} - * values for this instance. + * Gets the {@link Map} of {@link Statistic} keys to {@link Number} values for + * this instance. * - * @return The {@link Map} of {@link Statistic} keys to {@link Number} - * values for this instance. + * @return The {@link Map} of {@link Statistic} keys to {@link Number} values + * for this instance. */ public Map getStatistics() { Map stats = new LinkedHashMap<>(); @@ -350,8 +351,8 @@ private void printStatistics() { } /** - * Formats and prints the specified {@link Map} of {@link Statistic} keys - * to {@link Number} values. + * Formats and prints the specified {@link Map} of {@link Statistic} keys to + * {@link Number} values. * * @param stats The {@link Map} of {@link Statistic} keys to {@link Number} * values to print. @@ -473,12 +474,13 @@ protected static void printStandardOptionsUsage(PrintWriter pw) { " operations (i.e.: the number of engine threads). The number of", " threads for consuming messages and handling tasks is scaled based", " on the engine concurrency. If not specified, then this defaults to " - + DEFAULT_CORE_CONCURRENCY + ".", + + DEFAULT_CORE_CONCURRENCY + ".", " --> VIA ENVIRONMENT: " + CORE_CONCURRENCY.getEnvironmentVariable(), "", " --module-name ", " The module name to initialize with. If not specified, then the module", " name defaults to \"" + DEFAULT_INSTANCE_NAME + "\".", - " --> VIA ENVIRONMENT: " + CORE_INSTANCE_NAME.getEnvironmentVariable(), "", " --verbose [true|false]", + " --> VIA ENVIRONMENT: " + CORE_INSTANCE_NAME.getEnvironmentVariable(), "", + " --verbose [true|false]", " Also -verbose. If specified then initialize in verbose mode. The", " true/false parameter is optional, if not specified then true is assumed.", " If specified as false then it is the same as omitting the option with", @@ -531,14 +533,11 @@ protected static void printInfoQueueOptionsUsage(PrintWriter pw) { protected static void printDatabaseOptionsUsage(PrintWriter pw) { pw.println(multilineFormat("[ Data Mart Database Connectivity Options ]", " The following options pertain to configuring the connection to the data-mart", - " database. Exactly one such database must be configured.", - "", - " --sqlite-database-file ", + " database. Exactly one such database must be configured.", "", " --sqlite-database-file ", " Specifies an SQLite database file to open (or create) to use as the", " data-mart database. NOTE: SQLite may be used for testing, but because", " only one connection may be made, it will not scale for production use.", - " --> VIA ENVIRONMENT: " + DATABASE_URI.getEnvironmentVariable(), - "")); + " --> VIA ENVIRONMENT: " + DATABASE_URI.getEnvironmentVariable(), "")); } /** @@ -619,8 +618,7 @@ private static void exitOnError(Throwable t) { private SzEnvironment environment = null; /** - * The proxied {@link SzEnvironment} to prevent calling of - * {@link #destroy()}. + * The proxied {@link SzEnvironment} to prevent calling of {@link #destroy()}. */ private SzEnvironment proxyEnvironment = null; @@ -646,8 +644,8 @@ private static void exitOnError(Throwable t) { private SzReplicatorService replicatorService; /** - * Creates a new instance of {@link SzAutoCoreEnvironment} using - * the specified options. + * Creates a new instance of {@link SzAutoCoreEnvironment} using the specified + * options. * * @param options The {@link SzReplicatorOptions} to use. * @return The {@link SzAutoCoreEnvironment} that was created using the @@ -656,104 +654,86 @@ private static void exitOnError(Throwable t) { * @throws IllegalStateException If there is already an active instance of * {@link com.senzing.sdk.core.SzCoreEnvironment}. */ - protected static SzAutoCoreEnvironment createSzAutoCoreEnvironment( - SzReplicatorOptions options) - throws IllegalStateException - { + protected static SzAutoCoreEnvironment createSzAutoCoreEnvironment(SzReplicatorOptions options) throws IllegalStateException { String settings = JsonUtilities.toJsonText(options.getCoreSettings()); String instanceName = options.getCoreInstanceName(); - + boolean verbose = (options.getCoreLogLevel() != 0); - + int concurrency = options.getCoreConcurrency(); - + long refreshSeconds = options.getRefreshConfigSeconds(); - Duration duration = (refreshSeconds < 0) - ? null : Duration.ofSeconds(refreshSeconds); - - return SzAutoCoreEnvironment.newAutoBuilder() - .concurrency(concurrency) - .configRefreshPeriod(duration) - .settings(settings) - .instanceName(instanceName) - .verboseLogging(verbose) - .build(); + Duration duration = (refreshSeconds < 0) ? null : Duration.ofSeconds(refreshSeconds); + + return SzAutoCoreEnvironment.newAutoBuilder().concurrency(concurrency).configRefreshPeriod(duration) + .settings(settings).instanceName(instanceName).verboseLogging(verbose).build(); } /** * Constructs an instance of {@link SzReplicator} with the specified - * {@link SzReplicatorOptions} instance. The server will be started - * upon construction. + * {@link SzReplicatorOptions} instance. The server will be started upon + * construction. * * NOTE: This will initialize the Senzing Core SDK via * {@link SzAutoCoreEnvironment} and only one active instance of - * {@link com.senzing.sdk.core.SzCoreEnvironment} is allowed in a - * process at any given time. + * {@link com.senzing.sdk.core.SzCoreEnvironment} is allowed in a process at any + * given time. * * @param options The {@link SzReplicatorOptions} instance with which to * construct the API server instance. * - * @throws IllegalStateException If another instance of Senzing Core SDK - * is already actively initialized. + * @throws IllegalStateException If another instance of Senzing Core SDK is + * already actively initialized. * - * @throws Exception If a failure occurs. + * @throws Exception If a failure occurs. */ - public SzReplicator(SzReplicatorOptions options) - throws Exception - { + public SzReplicator(SzReplicatorOptions options) throws Exception { this(options, true); } /** * Constructs an instance of {@link SzReplicator} with the specified - * {@link SzReplicatorOptions} instance, optionally {@linkplain - * #start() starting} processing upon construction. + * {@link SzReplicatorOptions} instance, optionally {@linkplain #start() + * starting} processing upon construction. * - * @param options The {@link SzReplicatorOptions} instance with which to - * construct the API server instance. + * @param options The {@link SzReplicatorOptions} instance with which to + * construct the API server instance. * - * @param startProcessing true if processing should be started - * upon construction, otherwise false. + * @param startProcessing true if processing should be started upon + * construction, otherwise false. * * @throws Exception If a failure occurs. */ - public SzReplicator(SzReplicatorOptions options, boolean startProcessing) - throws Exception - { - this(createSzAutoCoreEnvironment(options), - true, - options, - startProcessing); + public SzReplicator(SzReplicatorOptions options, boolean startProcessing) throws Exception { + this(createSzAutoCoreEnvironment(options), true, options, startProcessing); } /** * Constructs an instance of {@link SzReplicator} with the specified - * {@link SzEnvironment} and {@link SzReplicatorOptions} instance. - * The constructed instance will not manage the specified - * {@link SzEnvironment} in that it will not attempt {@linkplain - * SzEnvironment#destroy() destroy} it upon destruction of this instance. + * {@link SzEnvironment} and {@link SzReplicatorOptions} instance. The + * constructed instance will not manage the specified + * {@link SzEnvironment} in that it will not attempt + * {@linkplain SzEnvironment#destroy() destroy} it upon destruction of this + * instance. * *

- * NOTE: Any of the {@linkplain SzReplicatorOptions options} - * specified pertaining to the creation of an {@link SzAutoCoreEnvironment} - * will be ignored. + * NOTE: Any of the {@linkplain SzReplicatorOptions options} specified + * pertaining to the creation of an {@link SzAutoCoreEnvironment} will be + * ignored. * - * @param environment The {@link SzEnvironment} to use. + * @param environment The {@link SzEnvironment} to use. * - * @param options The {@link SzReplicatorOptions} instance with which to - * construct the API server instance. + * @param options The {@link SzReplicatorOptions} instance with which to + * construct the API server instance. * - * @param startProcessing true if processing should be started - * upon construction, otherwise false. + * @param startProcessing true if processing should be started upon + * construction, otherwise false. * * @throws Exception If a failure occurs. */ - public SzReplicator(SzEnvironment environment, - SzReplicatorOptions options, - boolean startProcessing) - throws Exception - { + public SzReplicator(SzEnvironment environment, SzReplicatorOptions options, boolean startProcessing) + throws Exception { this(environment, false, options, startProcessing); } @@ -761,25 +741,21 @@ public SzReplicator(SzEnvironment environment, * Constructs an instance of {@link SzReplicator} with the specified * {@link SzReplicatorOptions} instance. * - * @param environment The {@link SzEnvironment} to use. + * @param environment The {@link SzEnvironment} to use. * - * @param manageEnv true if this instance should destroy the - * environment when done, otherwise false. + * @param manageEnv true if this instance should destroy the + * environment when done, otherwise false. * - * @param options The {@link SzReplicatorOptions} instance with which to - * construct the API server instance. + * @param options The {@link SzReplicatorOptions} instance with which to + * construct the API server instance. * - * @param startProcessing true if processing should be started - * upon construction, otherwise false. + * @param startProcessing true if processing should be started upon + * construction, otherwise false. * * @throws Exception If a failure occurs. */ - protected SzReplicator(SzEnvironment environment, - boolean manageEnv, - SzReplicatorOptions options, - boolean startProcessing) - throws Exception - { + protected SzReplicator(SzEnvironment environment, boolean manageEnv, SzReplicatorOptions options, boolean startProcessing) + throws Exception { // get the concurrency if (environment instanceof SzAutoEnvironment) { SzAutoEnvironment autoEnv = (SzAutoEnvironment) environment; @@ -795,25 +771,23 @@ protected SzReplicator(SzEnvironment environment, // set the environment this.environment = environment; - this.manageEnv = manageEnv; + this.manageEnv = manageEnv; // proxy the environment - this.proxyEnvironment = (SzEnvironment) - ReflectionUtilities.restrictedProxy(this.environment, DESTROY_METHOD); - + this.proxyEnvironment = (SzEnvironment) ReflectionUtilities.restrictedProxy(this.environment, DESTROY_METHOD); + // declare the scheduling service class (determine based on database type) String schedulingServiceClassName = null; // get the database URI ConnectionUri databaseUri = options.getDatabaseUri(); - + if (databaseUri instanceof SQLiteUri) { SQLiteUri sqliteUri = (SQLiteUri) databaseUri; Map connProps = sqliteUri.getQueryOptions(); - this.connector = (sqliteUri.isMemory()) - ? new SQLiteConnector(sqliteUri.getInMemoryIdentifier(), connProps) - : new SQLiteConnector(sqliteUri.getFile(), connProps); + this.connector = (sqliteUri.isMemory()) ? new SQLiteConnector(sqliteUri.getInMemoryIdentifier(), connProps) + : new SQLiteConnector(sqliteUri.getFile(), connProps); this.connPool = new ConnectionPool(this.connector, poolSize, maxPoolSize); @@ -822,16 +796,11 @@ protected SzReplicator(SzEnvironment environment, } else if (databaseUri instanceof PostgreSqlUri) { PostgreSqlUri postgreSqlUri = (PostgreSqlUri) databaseUri; - this.connector = new PostgreSqlConnector(postgreSqlUri.getHost(), - postgreSqlUri.getPort(), - postgreSqlUri.getDatabase(), - postgreSqlUri.getHost(), - postgreSqlUri.getPassword()); + this.connector = new PostgreSqlConnector(postgreSqlUri.getHost(), postgreSqlUri.getPort(), + postgreSqlUri.getDatabase(), postgreSqlUri.getHost(), postgreSqlUri.getPassword()); - this.connPool = new ConnectionPool(this.connector, - TransactionIsolation.READ_COMMITTED, - poolSize, - maxPoolSize); + this.connPool = new ConnectionPool(this.connector, TransactionIsolation.READ_COMMITTED, poolSize, + maxPoolSize); schedulingServiceClassName = PostgreSQLSchedulingService.class.getName(); } @@ -862,12 +831,12 @@ protected SzReplicator(SzEnvironment environment, this.replicatorService.init(replicatorJOB.build()); // build the message consumer - RabbitMqUri rabbitMqUri = options.getRabbitMqUri(); - SQSUri sqsUri = options.getSQSInfoUri(); - Boolean databaseQueue = options.isUsingDatabaseQueue(); - + RabbitMqUri rabbitMqUri = options.getRabbitMqUri(); + SQSUri sqsUri = options.getSQSInfoUri(); + Boolean databaseQueue = options.isUsingDatabaseQueue(); + JsonObjectBuilder consumerJOB = Json.createObjectBuilder(); - if (Boolean.TRUE.equals(databaseQueue)) { + if (Boolean.TRUE.equals(databaseQueue)) { this.queueRegistryName = TextUtilities.randomAlphanumericText(25); consumerJOB.add(SQLConsumer.CONNECTION_PROVIDER_KEY, this.connProviderName); consumerJOB.add(SQLConsumer.QUEUE_REGISTRY_NAME_KEY, this.queueRegistryName); @@ -877,14 +846,14 @@ protected SzReplicator(SzEnvironment environment, String queueName = options.getRabbitMqInfoQueue(); if (queueName == null) { throw new IllegalArgumentException( - "The RabbitMQ MQ must be specified if the RabbitMQ URI is provided."); + "The RabbitMQ MQ must be specified if the RabbitMQ URI is provided."); } consumerJOB.add(RabbitMQConsumer.CONCURRENCY_KEY, consumerConcurrency); consumerJOB.add(RabbitMQConsumer.MQ_HOST_KEY, rabbitMqUri.getHost()); consumerJOB.add(RabbitMQConsumer.MQ_USER_KEY, rabbitMqUri.getUser()); consumerJOB.add(RabbitMQConsumer.MQ_PASSWORD_KEY, rabbitMqUri.getPassword()); consumerJOB.add(RabbitMQConsumer.MQ_QUEUE_KEY, queueName); - + // check if we have the port parameter if (rabbitMqUri.hasPort()) { consumerJOB.add(RabbitMQConsumer.MQ_PORT_KEY, rabbitMqUri.getPort()); diff --git a/src/main/java/com/senzing/datamart/SzReplicatorOption.java b/src/main/java/com/senzing/datamart/SzReplicatorOption.java index a53f131..1562ce7 100644 --- a/src/main/java/com/senzing/datamart/SzReplicatorOption.java +++ b/src/main/java/com/senzing/datamart/SzReplicatorOption.java @@ -10,6 +10,7 @@ import static com.senzing.datamart.SzReplicatorConstants.*; import static com.senzing.io.IOUtilities.readTextFileAsString; +import static com.senzing.util.LoggingUtilities.formatStackTrace; import static com.senzing.util.LoggingUtilities.multilineFormat; /** @@ -53,14 +54,12 @@ public enum SzReplicatorOption implements CommandLineOptionCommand Line: --ignore-environment [true|false] * */ - IGNORE_ENVIRONMENT("--ignore-environment", null, - null, 0, "false"), + IGNORE_ENVIRONMENT("--ignore-environment", null, null, 0, "false"), /** *

- * Option for specifying the module name to initialize the Senzing API's - * with. The default value is {@link - * SzReplicatorConstants#DEFAULT_INSTANCE_NAME}. + * Option for specifying the module name to initialize the Senzing API's with. + * The default value is {@link SzReplicatorConstants#DEFAULT_INSTANCE_NAME}. *

* This option can be specified in the following ways: *

    @@ -69,16 +68,13 @@ public enum SzReplicatorOption implements CommandLineOptionSENZING_TOOLS_CORE_INSTANCE_NAME="{module-name}" *
*/ - CORE_INSTANCE_NAME("--core-instance-name", - ENV_PREFIX + "CORE_INSTANCE_NAME", - null, 1, DEFAULT_INSTANCE_NAME), + CORE_INSTANCE_NAME("--core-instance-name", ENV_PREFIX + "CORE_INSTANCE_NAME", null, 1, DEFAULT_INSTANCE_NAME), /** *

- * Option for specifying the core settings JSON with which to initialize - * the Core Senzing SDK. The parameter to this option should be the - * settings as a JSON object or the path to a file containing the - * settings JSON. + * Option for specifying the core settings JSON with which to initialize the + * Core Senzing SDK. The parameter to this option should be the settings as a + * JSON object or the path to a file containing the settings JSON. *

* This option can be specified in the following ways: *

    @@ -87,10 +83,8 @@ public enum SzReplicatorOption implements CommandLineOptionSENZING_TOOLS_CORE_SETTINGS="[{file-path}|{json-text}]" *
*/ - CORE_SETTINGS("--core-settings", - ENV_PREFIX + "CORE_SETTINGS", - List.of("SENZING_ENGINE_CONFIGURATION_JSON"), - true, 1), + CORE_SETTINGS("--core-settings", ENV_PREFIX + "CORE_SETTINGS", List.of("SENZING_ENGINE_CONFIGURATION_JSON"), true, + 1), /** *

@@ -104,15 +98,14 @@ public enum SzReplicatorOption implements CommandLineOptionEnvironment: SENZING_TOOLS_CORE_CONFIG_ID="{config-id}" * */ - CORE_CONFIG_ID("--core-config-id", - ENV_PREFIX + "CORE_CONFIG_ID", null, 1), + CORE_CONFIG_ID("--core-config-id", ENV_PREFIX + "CORE_CONFIG_ID", null, 1), /** *

- * This presence of this option determines if the Core Senzing SDK - * is initialized in verbose mode. The default value if not specified - * is muted (which is equivalent to zero). The parameter - * to this option may be specified as one of: + * This presence of this option determines if the Core Senzing SDK is + * initialized in verbose mode. The default value if not specified is + * muted (which is equivalent to zero). The parameter to this + * option may be specified as one of: *

    *
  • muted - To indicate no logging.
  • *
  • verbose - To indicate verbose logging.
  • @@ -128,16 +121,14 @@ public enum SzReplicatorOption implements CommandLineOptionSENZING_TOOLS_CORE_LOG_LEVEL="[muted|verbose|{integer}]" *
*/ - CORE_LOG_LEVEL("--core-log-level", - ENV_PREFIX + "CORE_LOG_LEVEL", null, - 0, "muted"), + CORE_LOG_LEVEL("--core-log-level", ENV_PREFIX + "CORE_LOG_LEVEL", null, 0, "muted"), /** *

- * This option sets the number of threads available for executing - * Core Senzing SDK functions. The single parameter to this option - * should be a positive integer. If not specified, then this - * defaults to {@link SzReplicatorConstants#DEFAULT_CORE_CONCURRENCY}, + * This option sets the number of threads available for executing Core Senzing + * SDK functions. The single parameter to this option should be a positive + * integer. If not specified, then this defaults to + * {@link SzReplicatorConstants#DEFAULT_CORE_CONCURRENCY}, *

* This option can be specified in the following ways: *

    @@ -146,61 +137,56 @@ public enum SzReplicatorOption implements CommandLineOptionSENZING_TOOLS_CORE_CONCURRENCY="{thread-count}" *
*/ - CORE_CONCURRENCY("--core-concurrency", - ENV_PREFIX + "CORE_CONCURRENCY", null, - 1, DEFAULT_CORE_CONCURRENCY_PARAM), + CORE_CONCURRENCY("--core-concurrency", ENV_PREFIX + "CORE_CONCURRENCY", null, 1, DEFAULT_CORE_CONCURRENCY_PARAM), - /** + /** *

* If leveraging the default configuration stored in the database, this option - * is used to specify how often the gRPC server should background check that - * the current active config is the same as the current default config and - * update the active config if not. The parameter to this option is specified - * as an integer: + * is used to specify how often the gRPC server should background check that the + * current active config is the same as the current default config and update + * the active config if not. The parameter to this option is specified as an + * integer: *

    *
  • A positive integer is interpreted as a number of seconds.
  • - *
  • If zero is specified, the auto-refresh is disabled and it will - * only occur when a requested configuration element is not found - * in the current active config.
  • - *
  • Specifying a negative integer is allowed but is used to enable - * a check and conditional refresh only when manually requested - * (programmatically).
  • + *
  • If zero is specified, the auto-refresh is disabled and it will only occur + * when a requested configuration element is not found in the current active + * config.
  • + *
  • Specifying a negative integer is allowed but is used to enable a check + * and conditional refresh only when manually requested (programmatically).
  • *
- * NOTE: This is option ignored if auto-refresh is disabled because - * the config was specified via the G2CONFIGFILE in the - * {@link #CORE_SETTINGS} or if {@link #CORE_CONFIG_ID} has been specified - * to lock in a specific configuration. + * NOTE: This is option ignored if auto-refresh is disabled because the + * config was specified via the G2CONFIGFILE in the + * {@link #CORE_SETTINGS} or if {@link #CORE_CONFIG_ID} has been specified to + * lock in a specific configuration. *

* This option can be specified in the following ways: *

    - *
  • Command Line: - * --refresh-config-seconds {integer}
  • + *
  • Command Line: --refresh-config-seconds {integer}
  • *
  • Environment: * SENZING_TOOLS_REFRESH_CONFIG_SECONDS="{integer}"
  • *
*/ - REFRESH_CONFIG_SECONDS("--refresh-config-seconds", - ENV_PREFIX + "REFRESH_CONFIG_SECONDS", null, - 1, DEFAULT_REFRESH_CONFIG_SECONDS_PARAM), + REFRESH_CONFIG_SECONDS("--refresh-config-seconds", ENV_PREFIX + "REFRESH_CONFIG_SECONDS", null, 1, + DEFAULT_REFRESH_CONFIG_SECONDS_PARAM), /** *

* Use this option to balance the message consumption and processing between * aggressively keeping the data mart closely in sync with the entity repository - * and less frequent batch processing to conserve system resources. The value - * to this option is one of the following: + * and less frequent batch processing to conserve system resources. The value to + * this option is one of the following: *

    - *
  • leisurely -- This setting allows for longer gaps between + *
  • leisurely -- This setting allows for longer gaps between * updating the data mart, favoring less frequent batch processing in order to * conserve system resources.
  • * - *
  • standard -- This is the default and is balance between + *
  • standard -- This is the default and is balance between * conserving system resources and keeping the data mart updated in a reasonably * timely manner.
  • * - *
  • aggressive -- This setting uses more system resources to - * aggressively consume and process incoming messages to keep the data mart closely - * in sync with the least time delay.
  • + *
  • aggressive -- This setting uses more system resources to + * aggressively consume and process incoming messages to keep the data mart + * closely in sync with the least time delay.
  • * *
* This option can be specified in the following ways: @@ -211,9 +197,9 @@ public enum SzReplicatorOption implements CommandLineOptionSENZING_TOOLS_REFRESH_CONFIG_SECONDS="{integer}" * */ - PROCESSING_RATE("--processing-rate", ENV_PREFIX + "PROCESSING_RATE", null, - 1, ProcessingRate.STANDARD.toString().toLowerCase()), - + PROCESSING_RATE("--processing-rate", ENV_PREFIX + "PROCESSING_RATE", null, 1, + ProcessingRate.STANDARD.toString().toLowerCase()), + /** *

* This option is used to specify the URL to an Amazon SQS queue to be used for @@ -232,12 +218,13 @@ public enum SzReplicatorOption implements CommandLineOption * This option is used to specify the URL to the RabbitMQ server for finding the - * RabbitMQ info queue. The single parameter to this option is an AMQP URL. If + * RabbitMQ info queue. The single parameter to this option is an AMQP URL. If * this option is specified then the SQS info queue parameter is not allowed. *

* This option can be specified in the following ways: *

    - *
  • Command Line: --rabbit-info-uri amqp://user:password@host:port/vhost
  • + *
  • Command Line: + * --rabbit-info-uri amqp://user:password@host:port/vhost
  • *
  • Environment: * SENZING_TOOLS_RABBITMQ_URI="amqp://user:password@host:port/vhost"
  • *
  • Environment: @@ -257,7 +244,8 @@ public enum SzReplicatorOption implements CommandLineOption *
  • Command Line: --rabbit-info-queue {queue-name}
  • - *
  • Environment: SENZING_TOOLS_RABBITMQ_INFO_QUEUE="{queue-name}"
  • + *
  • Environment: + * SENZING_TOOLS_RABBITMQ_INFO_QUEUE="{queue-name}"
  • *
*/ RABBITMQ_INFO_QUEUE("--rabbit-info-queue", ENV_PREFIX + "RABBITMQ_INFO_QUEUE", null, 1), @@ -291,20 +279,20 @@ public enum SzReplicatorOption implements CommandLineOption - *
  • {@value PostgreSqlUri#SUPPORTED_FORMAT_1}
  • - *
  • {@value PostgreSqlUri#SUPPORTED_FORMAT_2}
  • - *
  • {@value SQLiteUri#SUPPORTED_FORMAT_1}
  • - *
  • {@value SQLiteUri#SUPPORTED_FORMAT_2}
  • - *
  • {@value SQLiteUri#SUPPORTED_FORMAT_3}
  • + *
  • {@value PostgreSqlUri#SUPPORTED_FORMAT_1}
  • + *
  • {@value PostgreSqlUri#SUPPORTED_FORMAT_2}
  • + *
  • {@value SQLiteUri#SUPPORTED_FORMAT_1}
  • + *
  • {@value SQLiteUri#SUPPORTED_FORMAT_2}
  • + *
  • {@value SQLiteUri#SUPPORTED_FORMAT_3}
  • * - * NOTE: The PostgreSQL or SQLite URI can also be obtained from the + * NOTE: The PostgreSQL or SQLite URI can also be obtained from the * {@link #CORE_SETTINGS} by using a special URI in the following format: *
      - *
    • {@value SzCoreSettingsUri#SUPPORTED_FORMAT}
    • + *
    • {@value SzCoreSettingsUri#SUPPORTED_FORMAT}
    • *
    *

    * This option can be specified in the following ways: @@ -314,15 +302,12 @@ public enum SzReplicatorOption implements CommandLineOptionSENZING_TOOLS_DATA_MART_DATABASE_URI="{url}" * *

    - * The default value for this option if not specified is {@link - * SzReplicatorConstants#DEFAULT_CORE_SETTINGS_DATABASE_URI}. This - * is so it attempts to obtain the database URI from the {@linkplain - * #CORE_SETTINGS Senzing Core SDK settings}. + * The default value for this option if not specified is + * {@link SzReplicatorConstants#DEFAULT_CORE_SETTINGS_DATABASE_URI}. This is so + * it attempts to obtain the database URI from the {@linkplain #CORE_SETTINGS + * Senzing Core SDK settings}. */ - DATABASE_URI("--database-uri", - ENV_PREFIX + "DATA_MART_DATABASE_URI", - null, 1, - DEFAULT_CORE_SETTINGS_DATABASE_URI); + DATABASE_URI("--database-uri", ENV_PREFIX + "DATA_MART_DATABASE_URI", null, 1, DEFAULT_CORE_SETTINGS_DATABASE_URI); /** * Constructs with the specified parameters. @@ -353,7 +338,7 @@ public enum SzReplicatorOption implements CommandLineOptiontrue if this is a primary option, + * @param primary true if this is a primary option, * otherwise false. * @param cmdLineFlag The command-line flag. * @param envVariable The primary environment variable. @@ -473,24 +458,24 @@ public Set> getDependencies() { @Override public boolean isSensitive() { switch (this) { - case RABBITMQ_URI: - case DATABASE_URI: - case CORE_SETTINGS: - return true; - default: - return false; + case RABBITMQ_URI: + case DATABASE_URI: + case CORE_SETTINGS: + return true; + default: + return false; } } static { // force load the URI classes Class[] classes = { - ConnectionUri.class, - SQLiteUri.class, - PostgreSqlUri.class, - RabbitMqUri.class, - SQSUri.class, - SzCoreSettingsUri.class + ConnectionUri.class, + SQLiteUri.class, + PostgreSqlUri.class, + RabbitMqUri.class, + SQSUri.class, + SzCoreSettingsUri.class }; for (Class c : classes) { try { @@ -513,7 +498,7 @@ public boolean isSensitive() { lookupMap.put(option.getCommandLineFlag().toLowerCase(), option); } - SzReplicatorOption[] exclusiveOptions = {HELP, VERSION}; + SzReplicatorOption[] exclusiveOptions = { HELP, VERSION }; for (SzReplicatorOption option : SzReplicatorOption.values()) { for (SzReplicatorOption exclOption : exclusiveOptions) { if (option == exclOption) { @@ -572,8 +557,7 @@ public boolean isSensitive() { dependSet.addAll(requiredRabbit); baseDependSets.add(Collections.unmodifiableSet(dependSet)); - - SzReplicatorOption[] initOptions = {CORE_SETTINGS}; + SzReplicatorOption[] initOptions = { CORE_SETTINGS }; // make the primary options dependent on one set of info queue options for (SzReplicatorOption option : initOptions) { Set> dependencySets = dependencyMap.get(option); @@ -582,7 +566,8 @@ public boolean isSensitive() { } } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ExceptionInInitializerError(e); } finally { @@ -616,149 +601,135 @@ public Object process(CommandLineOption option, List params) { // down-cast SzReplicatorOption replicatorOption = (SzReplicatorOption) option; switch (replicatorOption) { - case HELP: - case VERSION: - return Boolean.TRUE; - - case IGNORE_ENVIRONMENT: - case DATABASE_INFO_QUEUE: - if (params.size() == 0) { + case HELP: + case VERSION: return Boolean.TRUE; - } - String boolText = params.get(0); - if ("false".equalsIgnoreCase(boolText)) { - return Boolean.FALSE; - } - if ("true".equalsIgnoreCase(boolText)) { - return Boolean.TRUE; - } - throw new IllegalArgumentException("The specified parameter for " + option.getCommandLineFlag() - + " must be true or false: " + params.get(0)); - case CORE_INSTANCE_NAME: - return params.get(0).trim(); + case IGNORE_ENVIRONMENT: + case DATABASE_INFO_QUEUE: + if (params.size() == 0) { + return Boolean.TRUE; + } + String boolText = params.get(0); + if ("false".equalsIgnoreCase(boolText)) { + return Boolean.FALSE; + } + if ("true".equalsIgnoreCase(boolText)) { + return Boolean.TRUE; + } + throw new IllegalArgumentException("The specified parameter for " + option.getCommandLineFlag() + + " must be true or false: " + params.get(0)); - case CORE_SETTINGS: { - String paramVal = params.get(0).trim(); - if (paramVal.length() == 0) { - throw new IllegalArgumentException( - "Missing parameter for core settings."); - } - if (paramVal.startsWith("{")) { - try { - return JsonUtilities.parseJsonObject(paramVal); + case CORE_INSTANCE_NAME: + return params.get(0).trim(); - } catch (Exception e) { - throw new IllegalArgumentException( - multilineFormat( - "Core settings is not valid JSON: ", - paramVal)); + case CORE_SETTINGS: { + String paramVal = params.get(0).trim(); + if (paramVal.length() == 0) { + throw new IllegalArgumentException("Missing parameter for core settings."); } - } else { - File initFile = new File(paramVal); - if (!initFile.exists()) { - throw new IllegalArgumentException( - "Specified JSON init file does not exist: " + initFile); + if (paramVal.startsWith("{")) { + try { + return JsonUtilities.parseJsonObject(paramVal); + + } catch (Exception e) { + throw new IllegalArgumentException( + multilineFormat("Core settings is not valid JSON: ", paramVal)); + } + } else { + File initFile = new File(paramVal); + if (!initFile.exists()) { + throw new IllegalArgumentException("Specified JSON init file does not exist: " + initFile); + } + String jsonText; + try { + jsonText = readTextFileAsString(initFile, "UTF-8"); + + } catch (IOException e) { + throw new RuntimeException( + multilineFormat("Failed to read JSON initialization file: " + initFile, "", + "Cause: " + e.getMessage())); + } + try { + return JsonUtilities.parseJsonObject(jsonText); + + } catch (Exception e) { + throw new IllegalArgumentException( + "The initialization file does not contain valid JSON: " + initFile); + } } - String jsonText; + } + case CORE_CONFIG_ID: try { - jsonText = readTextFileAsString(initFile, "UTF-8"); - - } catch (IOException e) { - throw new RuntimeException( - multilineFormat( - "Failed to read JSON initialization file: " - + initFile, - "", - "Cause: " + e.getMessage())); + return Long.parseLong(params.get(0)); + } catch (Exception e) { + throw new IllegalArgumentException("The configuration ID for " + option.getCommandLineFlag() + + " must be an integer: " + params.get(0)); } - try { - return JsonUtilities.parseJsonObject(jsonText); - } catch (Exception e) { - throw new IllegalArgumentException( - "The initialization file does not contain valid JSON: " - + initFile); + case CORE_LOG_LEVEL: { + String paramVal = params.get(0).trim().toLowerCase(); + + switch (paramVal) { + case "verbose": + case "1": + return true; + case "muted": + case "0": + return false; + default: + throw new IllegalArgumentException( + "The specified core log level is not recognized; " + paramVal); } } - } - case CORE_CONFIG_ID: - try { - return Long.parseLong(params.get(0)); - } catch (Exception e) { - throw new IllegalArgumentException( - "The configuration ID for " + option.getCommandLineFlag() - + " must be an integer: " + params.get(0)); + + case CORE_CONCURRENCY: { + int threadCount; + try { + threadCount = Integer.parseInt(params.get(0)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException("Thread count must be an integer: " + params.get(0)); + } + if (threadCount <= 0) { + throw new IllegalArgumentException("Negative thread counts are not allowed: " + threadCount); + } + return threadCount; } - case CORE_LOG_LEVEL: { - String paramVal = params.get(0).trim().toLowerCase(); - - switch (paramVal) { - case "verbose": - case "1": - return true; - case "muted": - case "0": - return false; - default: + case REFRESH_CONFIG_SECONDS: + try { + return Long.parseLong(params.get(0)); + } catch (Exception e) { throw new IllegalArgumentException( - "The specified core log level is not recognized; " + paramVal); - } - } + "The specified refresh period for " + option.getCommandLineFlag() + + " must be an integer: " + params.get(0)); + } - case CORE_CONCURRENCY: { - int threadCount; - try { - threadCount = Integer.parseInt(params.get(0)); - } catch (IllegalArgumentException e) { - throw new IllegalArgumentException( - "Thread count must be an integer: " + params.get(0)); - } - if (threadCount <= 0) { - throw new IllegalArgumentException( - "Negative thread counts are not allowed: " + threadCount); - } - return threadCount; - } - - case REFRESH_CONFIG_SECONDS: - try { - return Long.parseLong(params.get(0)); - } catch (Exception e) { - throw new IllegalArgumentException( - "The specified refresh period for " - + option.getCommandLineFlag() + " must be an integer: " - + params.get(0)); - } + case PROCESSING_RATE: + return parseProcessingRate(params.get(0)); - case PROCESSING_RATE: - return parseProcessingRate(params.get(0)); + case SQS_INFO_URI: + return SQSUri.parse(params.get(0)); - case SQS_INFO_URI: - return SQSUri.parse(params.get(0)); + case RABBITMQ_URI: + return RabbitMqUri.parse(params.get(0)); - case RABBITMQ_URI: - return RabbitMqUri.parse(params.get(0)); + case RABBITMQ_INFO_QUEUE: + return params.get(0); - case RABBITMQ_INFO_QUEUE: - return params.get(0); + case DATABASE_URI: + return parseDatabaseUri(params.get(0)); - case DATABASE_URI: - return parseDatabaseUri(params.get(0)); - - default: - throw new IllegalArgumentException( - "Unhandled command line option: " - + option.getCommandLineFlag() + " / " + option); + default: + throw new IllegalArgumentException( + "Unhandled command line option: " + option.getCommandLineFlag() + " / " + option); } } } /** - * Parses the specified parameter value as a database - * {@link ConnectionUri}. + * Parses the specified parameter value as a database {@link ConnectionUri}. * * @param paramValue The parameter value to parse. * @@ -777,14 +748,12 @@ public static ProcessingRate parseProcessingRate(String paramValue) { prefix = ", "; } throw new IllegalArgumentException( - "Unrecognized processing rate value (" + paramValue - + "). Should be one of: " + sb.toString()); + "Unrecognized processing rate value (" + paramValue + "). Should be one of: " + sb.toString()); } } /** - * Parses the specified parameter value as a database - * {@link ConnectionUri}. + * Parses the specified parameter value as a database {@link ConnectionUri}. * * @param paramValue The parameter value to parse. * @@ -792,13 +761,11 @@ public static ProcessingRate parseProcessingRate(String paramValue) { */ public static ConnectionUri parseDatabaseUri(String paramValue) { Objects.requireNonNull(paramValue, "Parameter value cannot be null"); - Set> allowed - = Set.of(PostgreSqlUri.class, SQLiteUri.class); - + Set> allowed = Set.of(PostgreSqlUri.class, SQLiteUri.class); + ConnectionUri uri = ConnectionUri.parse(paramValue); if (!allowed.contains(uri.getClass())) { - throw new IllegalArgumentException( - "Unrecognized database connection URI: " + paramValue); + throw new IllegalArgumentException("Unrecognized database connection URI: " + paramValue); } return uri; } diff --git a/src/main/java/com/senzing/datamart/handlers/RefreshEntityHandler.java b/src/main/java/com/senzing/datamart/handlers/RefreshEntityHandler.java index 6b3c068..6f7c650 100644 --- a/src/main/java/com/senzing/datamart/handlers/RefreshEntityHandler.java +++ b/src/main/java/com/senzing/datamart/handlers/RefreshEntityHandler.java @@ -1,6 +1,7 @@ package com.senzing.datamart.handlers; import com.senzing.datamart.SzReplicationProvider; +import com.senzing.datamart.SzReplicationProvider.TaskAction; import com.senzing.datamart.model.*; import com.senzing.listener.service.exception.ServiceExecutionException; import com.senzing.listener.service.locking.ResourceKey; @@ -190,7 +191,8 @@ protected void handleTask(Map parameters, int multiplicity, Sche conn.rollback(); } catch (Exception e2) { logError(e2, "FAILED TO ROLLBACK: "); - e2.printStackTrace(); + System.err.println(e2.getMessage()); + System.err.println(formatStackTrace(e2.getStackTrace())); } throw new ServiceExecutionException(e); @@ -821,10 +823,8 @@ private static void followUpOnRelatedEntity(Scheduler followUpScheduler, Set parameters, int multiplicity, Sche } } catch (Exception e2) { logError(e2, "**** FAILED TO ROLLBACK"); - e2.printStackTrace(); + System.err.println(e2.getMessage()); + System.err.println(formatStackTrace(e2.getStackTrace())); } throw new ServiceExecutionException(e); @@ -434,7 +437,7 @@ protected int updateReportDetails(Connection conn, SzReportKey reportKey, String String key = String.valueOf(entityId); int[] deltaArr = deltaSumMap.get(key); if (deltaArr == null) { - deltaArr = new int[] {0}; + deltaArr = new int[] { 0 }; deltaSumMap.put(key, deltaArr); } deltaArr[0] += entityDelta; @@ -445,7 +448,7 @@ protected int updateReportDetails(Connection conn, SzReportKey reportKey, String String key = entityId + ":" + relatedId; int[] deltaArr = deltaSumMap.get(key); if (deltaArr == null) { - deltaArr = new int[] {0}; + deltaArr = new int[] { 0 }; deltaSumMap.put(key, deltaArr); } deltaArr[0] += relationDelta; diff --git a/src/main/java/com/senzing/datamart/model/SzRelationship.java b/src/main/java/com/senzing/datamart/model/SzRelationship.java index 9ba82fd..8327c53 100644 --- a/src/main/java/com/senzing/datamart/model/SzRelationship.java +++ b/src/main/java/com/senzing/datamart/model/SzRelationship.java @@ -9,6 +9,7 @@ import java.util.*; import static com.senzing.util.JsonUtilities.*; +import static com.senzing.util.LoggingUtilities.formatStackTrace; /** * Describes a relationship between two entities as it is stored in the data @@ -428,7 +429,8 @@ public static void main(String[] args) { System.out.println(relationship1.equals(relationship2)); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); } } } diff --git a/src/main/java/com/senzing/datamart/reports/EntityRelationsReportsService.java b/src/main/java/com/senzing/datamart/reports/EntityRelationsReportsService.java index 5b97ea9..a4e3f4f 100644 --- a/src/main/java/com/senzing/datamart/reports/EntityRelationsReportsService.java +++ b/src/main/java/com/senzing/datamart/reports/EntityRelationsReportsService.java @@ -13,6 +13,7 @@ import com.senzing.util.Timers; import static com.senzing.sql.SQLUtilities.close; +import static com.senzing.util.LoggingUtilities.formatStackTrace; import java.sql.Connection; import java.sql.SQLException; @@ -64,11 +65,13 @@ default SzEntityRelationsBreakdown getEntityRelationsBreakdown() throws ReportsS return EntityRelationsReports.getEntityRelationsBreakdown(conn, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -104,11 +107,13 @@ default SzEntityRelationsCount getEntityRelationsCount(@Param("relationCount") i return EntityRelationsReports.getEntityRelationsCount(conn, relationCount, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -155,11 +160,13 @@ default SzEntitiesPage getEntityRelationsEntities(@Param("relationCount") int re return EntityRelationsReports.getEntityIdsForRelationCount(conn, relationCount, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { diff --git a/src/main/java/com/senzing/datamart/reports/EntitySizeReportsService.java b/src/main/java/com/senzing/datamart/reports/EntitySizeReportsService.java index f5ae274..75e0c38 100644 --- a/src/main/java/com/senzing/datamart/reports/EntitySizeReportsService.java +++ b/src/main/java/com/senzing/datamart/reports/EntitySizeReportsService.java @@ -13,6 +13,7 @@ import com.senzing.util.Timers; import static com.senzing.sql.SQLUtilities.close; +import static com.senzing.util.LoggingUtilities.formatStackTrace; import java.sql.Connection; import java.sql.SQLException; @@ -63,11 +64,13 @@ default SzEntitySizeBreakdown getEntitySizeBreakdown() throws ReportsServiceExce return EntitySizeReports.getEntitySizeBreakdown(conn, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -102,11 +105,13 @@ default SzEntitySizeCount getEntitySizeCount(@Param("entitySize") int entitySize return EntitySizeReports.getEntitySizeCount(conn, entitySize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -154,11 +159,13 @@ default SzEntitiesPage getEntitySizeEntities(@Param("entitySize") int entitySize return EntitySizeReports.getEntityIdsForEntitySize(conn, entitySize, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { diff --git a/src/main/java/com/senzing/datamart/reports/LoadedStatsReportsService.java b/src/main/java/com/senzing/datamart/reports/LoadedStatsReportsService.java index 2860904..67ab3fb 100644 --- a/src/main/java/com/senzing/datamart/reports/LoadedStatsReportsService.java +++ b/src/main/java/com/senzing/datamart/reports/LoadedStatsReportsService.java @@ -13,6 +13,7 @@ import com.senzing.util.Timers; import static com.senzing.sql.SQLUtilities.close; +import static com.senzing.util.LoggingUtilities.formatStackTrace; import java.sql.Connection; import java.sql.SQLException; @@ -71,11 +72,13 @@ default SzLoadedStats getLoadedStatistics(@Param("onlyLoadedSources") @Default(" return LoadedStatsReports.getLoadedStatistics(conn, dataSources, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -111,11 +114,13 @@ default SzSourceLoadedStats getSourceLoadedStatistics(@Param("dataSourceCode") S return LoadedStatsReports.getSourceLoadedStatistics(conn, dataSource, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -163,11 +168,13 @@ default SzEntitiesPage getEntityIdsForDataSource(@Param("dataSourceCode") String return LoadedStatsReports.getEntityIdsForDataSource(conn, dataSource, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { diff --git a/src/main/java/com/senzing/datamart/reports/SummaryStatsReportsService.java b/src/main/java/com/senzing/datamart/reports/SummaryStatsReportsService.java index 0ae0248..0ec1afd 100644 --- a/src/main/java/com/senzing/datamart/reports/SummaryStatsReportsService.java +++ b/src/main/java/com/senzing/datamart/reports/SummaryStatsReportsService.java @@ -17,6 +17,7 @@ import com.senzing.util.Timers; import static com.senzing.sql.SQLUtilities.close; +import static com.senzing.util.LoggingUtilities.formatStackTrace; import java.sql.Connection; import java.sql.SQLException; @@ -202,11 +203,13 @@ default SzSummaryStats getSummaryStats(@Param("matchKey") @Nullable String match return SummaryStatsReports.getSummaryStatistics(conn, matchKey, principle, dataSources, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -255,11 +258,13 @@ default SzSourceSummary getSourceSummary(@Param("dataSourceCode") String dataSou return SummaryStatsReports.getSourceSummary(conn, dataSource, matchKey, principle, dataSources, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -311,11 +316,13 @@ default SzCrossSourceSummary getCrossSourceSummary(@Param("dataSourceCode") Stri timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -367,11 +374,13 @@ default SzCrossSourceMatchCounts getCrossSourceMatchSummary(@Param("dataSourceCo timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -423,11 +432,13 @@ default SzCrossSourceRelationCounts getCrossSourceAmbiguousMatchSummary(@Param(" principle, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -481,11 +492,13 @@ default SzCrossSourceRelationCounts getCrossSourcePossibleMatchSummary(@Param("d principle, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -539,11 +552,13 @@ default SzCrossSourceRelationCounts getCrossSourcePossibleRelationSummary(@Param principle, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -597,11 +612,13 @@ default SzCrossSourceRelationCounts getCrossSourceDisclosedRelationSummary(@Para principle, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -659,11 +676,13 @@ default SzEntitiesPage getSummaryMatchEntityIds(@Param("dataSourceCode") String return SummaryStatsReports.getSummaryMatchEntityIds(conn, dataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -722,11 +741,13 @@ default SzEntitiesPage getSummaryAmbiguousMatchEntityIds(@Param("dataSourceCode" return SummaryStatsReports.getSummaryAmbiguousMatchEntityIds(conn, dataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -786,11 +807,13 @@ default SzEntitiesPage getSummaryPossibleMatchEntityIds(@Param("dataSourceCode") return SummaryStatsReports.getSummaryPossibleMatchEntityIds(conn, dataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -850,11 +873,13 @@ default SzEntitiesPage getSummaryPossibleRelationEntityIds(@Param("dataSourceCod return SummaryStatsReports.getSummaryPossibleRelationEntityIds(conn, dataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -914,11 +939,13 @@ default SzEntitiesPage getSummaryDisclosedRelatedEntityIds(@Param("dataSourceCod return SummaryStatsReports.getSummaryDisclosedRelatedEntityIds(conn, dataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -982,11 +1009,13 @@ default SzEntitiesPage getSummaryMatchEntityIds(@Param("dataSourceCode") String return SummaryStatsReports.getSummaryMatchEntityIds(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1050,11 +1079,13 @@ default SzEntitiesPage getSummaryAmbiguousMatchEntityIds(@Param("dataSourceCode" return SummaryStatsReports.getSummaryAmbiguousMatchEntityIds(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1118,11 +1149,13 @@ default SzEntitiesPage getSummaryPossibleMatchEntityIds(@Param("dataSourceCode") return SummaryStatsReports.getSummaryPossibleMatchEntityIds(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1186,11 +1219,13 @@ default SzEntitiesPage getSummaryPossibleRelationEntityIds(@Param("dataSourceCod return SummaryStatsReports.getSummaryPossibleRelationEntityIds(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1254,11 +1289,13 @@ default SzEntitiesPage getSummaryDisclosedRelationEntityIds(@Param("dataSourceCo return SummaryStatsReports.getSummaryDisclosedRelationEntityIds(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1322,11 +1359,13 @@ default SzRelationsPage getSummaryAmbiguousMatchRelations(@Param("dataSourceCode return SummaryStatsReports.getSummaryAmbiguousMatchRelations(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1390,11 +1429,13 @@ default SzRelationsPage getSummaryPossibleMatchRelations(@Param("dataSourceCode" return SummaryStatsReports.getSummaryPossibleMatchRelations(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1458,11 +1499,13 @@ default SzRelationsPage getSummaryPossibleRelations(@Param("dataSourceCode") Str return SummaryStatsReports.getSummaryPossibleRelations(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { @@ -1526,11 +1569,13 @@ default SzRelationsPage getSummaryDisclosedRelations(@Param("dataSourceCode") St return SummaryStatsReports.getSummaryDisclosedRelations(conn, dataSource, vsDataSource, matchKey, principle, entityIdBound, boundType, pageSize, sampleSize, timers); } catch (SQLException e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); throw new ReportsServiceException(e); } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); if (e instanceof RuntimeException) { throw ((RuntimeException) e); } else { diff --git a/src/main/java/com/senzing/datamart/schema/SchemaBuilder.java b/src/main/java/com/senzing/datamart/schema/SchemaBuilder.java index 314bb48..c20549a 100644 --- a/src/main/java/com/senzing/datamart/schema/SchemaBuilder.java +++ b/src/main/java/com/senzing/datamart/schema/SchemaBuilder.java @@ -21,10 +21,10 @@ protected SchemaBuilder() { } /** - * Ensures the schema exists and optionally drops the schema before - * recreating it. + * Ensures the schema exists and optionally drops the schema before recreating + * it. * - * @param conn The JDBC {@link Connection} to use for creating the schema. + * @param conn The JDBC {@link Connection} to use for creating the schema. * * @param recreate true if the schema should be dropped and * recreated, or false if any existing schema @@ -33,20 +33,17 @@ protected SchemaBuilder() { * @throws SQLException If a JDBC failure occurs. * */ - public abstract void ensureSchema(Connection conn, boolean recreate) - throws SQLException; + public abstract void ensureSchema(Connection conn, boolean recreate) throws SQLException; /** * Utility method to execute a {@link List} of SQL statements. * - * @param conn The {@link Connection} with which to execute the statements. + * @param conn The {@link Connection} with which to execute the statements. * @param sqlList The {@link List} of SQL statements to execute. * * @throws SQLException If a JDBC failure occurs. */ - protected void executeStatements(Connection conn, List sqlList) - throws SQLException - { + protected void executeStatements(Connection conn, List sqlList) throws SQLException { Statement stmt = null; ResultSet rs = null; try { diff --git a/src/main/java/com/senzing/listener/communication/AbstractMessageConsumer.java b/src/main/java/com/senzing/listener/communication/AbstractMessageConsumer.java index 946bd5e..93a550a 100644 --- a/src/main/java/com/senzing/listener/communication/AbstractMessageConsumer.java +++ b/src/main/java/com/senzing/listener/communication/AbstractMessageConsumer.java @@ -6,6 +6,7 @@ import com.senzing.listener.service.locking.LockingService; import com.senzing.listener.service.locking.ProcessScopeLockingService; import com.senzing.util.AsyncWorkerPool; +import com.senzing.util.AsyncWorkerPool.AsyncResult; import com.senzing.util.JsonUtilities; import com.senzing.util.Timers; @@ -462,12 +463,12 @@ protected synchronized void setState(State state) { } /** - * Once this instance has transferred to the {@link State#DESTROYING} - * state, this method can be called to wait until we have transitioned - * to the {@link State#DESTROYED} state. + * Once this instance has transferred to the {@link State#DESTROYING} state, + * this method can be called to wait until we have transitioned to the + * {@link State#DESTROYED} state. * - * @throws IllegalStateException If this method is called when NOT - * in the {@link State#DESTROYED} or + * @throws IllegalStateException If this method is called when NOT in the + * {@link State#DESTROYED} or * {@link State#DESTROYING} state. */ protected synchronized void waitUntilDestroyed() { @@ -479,8 +480,7 @@ protected synchronized void waitUntilDestroyed() { // check if NOT destroying if (this.getState() != State.DESTROYING) { throw new IllegalStateException( - "Cannot call waitUntilDestroyed() if NOT currently destroying: " - + this.getState()); + "Cannot call waitUntilDestroyed() if NOT currently destroying: " + this.getState()); } // wait until notified @@ -1214,7 +1214,8 @@ protected void processMessages(MessageProcessor processor) { } } catch (Exception e) { - e.printStackTrace(); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); } } diff --git a/src/main/java/com/senzing/listener/communication/sql/SQLConsumer.java b/src/main/java/com/senzing/listener/communication/sql/SQLConsumer.java index b637950..032d8c9 100644 --- a/src/main/java/com/senzing/listener/communication/sql/SQLConsumer.java +++ b/src/main/java/com/senzing/listener/communication/sql/SQLConsumer.java @@ -36,1060 +36,1018 @@ import static com.senzing.listener.communication.MessageConsumer.State.*; /** - * A consumer for a SQL-based message queue using a database table to hold - * the pending messages. + * A consumer for a SQL-based message queue using a database table to hold the + * pending messages. */ public class SQLConsumer extends AbstractMessageConsumer { - /** - * Provides an interface for interacting with the message queue used - * by the associated {@link SQLConsumer}. - */ - public interface MessageQueue { /** - * Checks if the message queue is empty. - * - * @return true if the queue is empty, - * otherwise false. - * - * @throws SQLException If a database failure occurs. + * Provides an interface for interacting with the message queue used by the + * associated {@link SQLConsumer}. */ - boolean isEmpty() throws SQLException; + public interface MessageQueue { + /** + * Checks if the message queue is empty. + * + * @return true if the queue is empty, otherwise + * false. + * + * @throws SQLException If a database failure occurs. + */ + boolean isEmpty() throws SQLException; + + /** + * Gets the number of messages currently in the message queue. The returned + * value will include leased messages. + * + * @return The number of messages in the message queue (including leased + * messages). + * + * @throws SQLException If a database failure occurs. + */ + int getMessageCount() throws SQLException; + + /** + * Enqueues a message on this {@link MessageQueue} so the associated + * {@link SQLConsumer} can consume it. + * + * @param message The message to enqueue. + * + * @throws SQLException If a SQL failure occurs. + */ + void enqueueMessage(String message) throws SQLException; + + /** + * Gets the associated {@link SQLConsumer}. + * + * @return The associated {@link SQLConsumer}. + */ + SQLConsumer getSQLConsumer(); + } /** - * Gets the number of messages currently in the message queue. The - * returned value will include leased messages. - * - * @return The number of messages in the message queue (including leased - * messages). + * Provides a {@link MessageQueue} implementation that is backed by the + * {@link SQLClient} for the associated {@link SQLConsumer}. * - * @throws SQLException If a database failure occurs. */ - int getMessageCount() throws SQLException; + protected class SimpleMessageQueue implements MessageQueue { + /** + * The {@link SQLClient} backing this instance. + */ + private SQLClient client = null; + + /** + * Constructs with the {@link SQLClient} to use. + * + * @param client The {@link SQLClient} to use. + */ + protected SimpleMessageQueue(SQLClient client) { + this.client = client; + } + + /** + * {@inheritDoc} + *

    + * Implemented to call {@link SQLClient#isQueueEmpty(Connection)} on the backing + * {@link SQLClient}. + *

    + */ + public boolean isEmpty() throws SQLException { + Connection conn = null; + try { + conn = SQLConsumer.this.getConnection(); + + return this.client.isQueueEmpty(conn); + + } finally { + conn = close(conn); + } + } + + /** + * {@inheritDoc} + *

    + * Implemented to call {@link SQLClient#getMessageCount(Connection)} on the + * backing {@link SQLClient}. + *

    + */ + public int getMessageCount() throws SQLException { + Connection conn = null; + try { + conn = SQLConsumer.this.getConnection(); + + return this.client.getMessageCount(conn); + + } finally { + conn = close(conn); + } + } + + /** + * {@inheritDoc} + *

    + * Implemented to call {@link SQLClient#insertMessage(Connection,String)} on the + * backing {@link SQLClient}. + *

    + */ + public void enqueueMessage(String message) throws SQLException { + Connection conn = null; + try { + conn = SQLConsumer.this.getConnection(); + + this.client.insertMessage(conn, message); + + conn.commit(); + + } finally { + conn = close(conn); + } + } + + /** + * {@inheritDoc} + */ + public SQLConsumer getSQLConsumer() { + return SQLConsumer.this; + } + } /** - * Enqueues a message on this {@link MessageQueue} so the associated - * {@link SQLConsumer} can consume it. - * - * @param message The message to enqueue. - * - * @throws SQLException If a SQL failure occurs. + * {@link Registry} used to register the {@link MessageQueue} instances + * associated with each {@link SQLConsumer}. In order to register the + * {@link MessageQueue} the {@link #QUEUE_REGISTRY_NAME_KEY} initialization + * parameter must be provided for the {@link SQLConsumer}. */ - void enqueueMessage(String message) throws SQLException; + public static final Registry MESSAGE_QUEUE_REGISTRY = new Registry<>(false); /** - * Gets the associated {@link SQLConsumer}. - * - * @return The associated {@link SQLConsumer}. + * The initialization parameter key used to obtain the name for binding the + * {@link MessageQueue} instance for interacting with the backing message queue + * of the {@link SQLConsumer} in the {@link #MESSAGE_QUEUE_REGISTRY}. There is + * no default value for this initialization parameter, if it is not specified + * then the {@link MessageQueue} is not registered in the + * {@link #MESSAGE_QUEUE_REGISTRY}. The {@link MessageQueue} is unbound when the + * {@link SQLConsumer} is destroyed. */ - SQLConsumer getSQLConsumer(); - } - - /** - * Provides a {@link MessageQueue} implementation that is backed - * by the {@link SQLClient} for the associated {@link SQLConsumer}. - * - */ - protected class SimpleMessageQueue implements MessageQueue { + public static final String QUEUE_REGISTRY_NAME_KEY = "queueRegistryName"; + /** - * The {@link SQLClient} backing this instance. + * The initialization parameter key for checking if the persistent store of + * messages should be dropped / deleted and recreated during initialization. + * Values should be true or false. */ - private SQLClient client = null; + public static final String CLEAN_DATABASE_KEY = "cleanDatabase"; /** - * Constructs with the {@link SQLClient} to use. - * - * @param client The {@link SQLClient} to use. + * The initialization parameter key for obtaining the {@link ConnectionProvider} + * to use for connecting to the database from the + * {@link ConnectionProvider#REGISTRY}. */ - protected SimpleMessageQueue(SQLClient client) { - this.client = client; - } + public static final String CONNECTION_PROVIDER_KEY = "connectionProvider"; /** - * {@inheritDoc} - *

    - * Implemented to call {@link SQLClient#isQueueEmpty(Connection)} - * on the backing {@link SQLClient}. - *

    + * The initialization parameter to configure the maximum number of times to + * retry a failed attempt to select messages from the database before aborting + * consumption. */ - public boolean isEmpty() throws SQLException { - Connection conn = null; - try { - conn = SQLConsumer.this.getConnection(); + public static final String MAXIMUM_RETRIES_KEY = "maximumRetries"; - return this.client.isQueueEmpty(conn); + /** + * The initialization parameter to configure the number of milliseconds to wait + * to retry when a failure occurs. This is only matters if the configured + * {@linkplain #MAXIMUM_RETRIES_KEY failure threshold} is greater than one (1). + */ + public static final String RETRY_WAIT_TIME_KEY = "retryWaitTime"; - } finally { - conn = close(conn); - } - } + /** + * The initialization parameter to configure the number of seconds + * messages are leased on the database table before they become available to + * another consumer instance. If not configured then {@link #DEFAULT_LEASE_TIME} + * is used. Specifying this initialization parameter allows the clients to + * override. + */ + public static final String LEASE_TIME_KEY = "leaseTime"; /** - * {@inheritDoc} - *

    - * Implemented to call {@link SQLClient#getMessageCount(Connection)} - * on the backing {@link SQLClient}. - *

    + * The initialization parameter to configure the maximum number of messages to + * be leased from the database table at one time. If not configured then + * {@link #DEFAULT_MAXIMUM_LEASE_COUNT} is used. Specifying this initialization + * parameter allows clients to override. + */ + public static final String MAXIMUM_LEASE_COUNT_KEY = "maximumLeaseCount"; + + /** + * The initialization parameter to configure the maximum number of + * seconds to sleep when the database queue is found to be empty in order + * to avoid a busy loop of constant queries. The actual amount of time used for + * sleep will progressively increase as the message queue continues to be empty + * until it equals the configured maximum number of seconds. If not configured + * then {@link #DEFAULT_MAXIMUM_SLEEP_TIME} is used. Specifying this + * initialization parameter allows clients to override. + */ + public static final String MAXIMUM_SLEEP_TIME_KEY = "maximumSleepTime"; + + /** + * The default number of times to retry failed SQS requests before aborting + * consumption. The default value is {@value}. A different value can be set via + * the {#link #MAXIMUM_RETRIES_KEY} initialization parameter. + */ + public static final int DEFAULT_MAXIMUM_RETRIES = 0; + + /** + * The default number of milliseconds to wait before retrying the SQS request if + * the previous request failed. The default value is {@value}. A different value + * can be set via the {@link #RETRY_WAIT_TIME_KEY} initialization parameter. + */ + public static final long DEFAULT_RETRY_WAIT_TIME = 1000L; + + /** + * The default number of seconds to lease a message in the database table, + * preventing other consumers from obtaining it. The default value is + * {@value}. A different value can be set via the {@link #LEASE_TIME_KEY} + * initialization parameter. + */ + public static final int DEFAULT_LEASE_TIME = 1800; + + /** + * The default maximum number of messages to be leased from the database table + * at one time. The default value is {@value}. A different value can be set via + * the {@link #MAXIMUM_LEASE_COUNT_KEY} initialization parameter. + */ + public static final int DEFAULT_MAXIMUM_LEASE_COUNT = 100; + + /** + * The default maximum number of second to sleep when an empty queue is + * encountered in order to avoid a busy loop of querying the database. The + * actual amount of time used for sleep will progressively increase as the + * message queue continues to be empty until it equals the configured maximum + * number of seconds. The default value is {@value}. A different value can be + * set via the {@link #MAXIMUM_SLEEP_TIME_KEY} initialization parameter. + */ + public static final int DEFAULT_MAXIMUM_SLEEP_TIME = 10; + + /** + * Defined constant for one second in milliseconds. + */ + private static final long ONE_SECOND = 1000L; + + /** + * The {@link ConnectionProvider} to use for obtaining {@link Connection} + * instances. + */ + private ConnectionProvider connectionProvider; + + /** + * The {@link MessageQueue} for this instance. + */ + private MessageQueue messageQueue; + + /** + * The name for binding the {@link #messageQueue} in the + * {@link #MESSAGE_QUEUE_REGISTRY}. + */ + private String queueRegistryName = null; + + /** + * The {@link AccessToken} for unbinding the {@link #messageQueue} from the + * {@link #MESSAGE_QUEUE_REGISTRY}. + */ + private AccessToken registryToken = null; + + /** + * The {@link SQLClient} to use for interacting with the database. */ - public int getMessageCount() throws SQLException { - Connection conn = null; - try { - conn = SQLConsumer.this.getConnection(); + private SQLClient sqlClient; - return this.client.getMessageCount(conn); + /** + * The consumption thread for this instance. + */ + private Thread consumptionThread = null; + + /** + * The maximum number of times to retry failed SQS requests before aborting + * consumption. + */ + private int maximumRetries = DEFAULT_MAXIMUM_RETRIES; + + /** + * The number of milliseconds to wait before retrying the SQS request if the + * previous request failed. + */ + private long retryWaitTime = DEFAULT_RETRY_WAIT_TIME; + + /** + * The configured number of seconds to lease messages from the database queue + * before they become available to other consumers. + */ + private int leaseTime = DEFAULT_LEASE_TIME; + + /** + * The configured maximum number of messages to lease at one time from the + * database queue. + */ + private int maximumLeaseCount = DEFAULT_MAXIMUM_LEASE_COUNT; + + /** + * The configured maximum number of second to sleep when an empty queue is + * encountered in order to avoid a busy loop of querying the database. The + * actual amount of time used for sleep will progressively increase as the + * message queue continues to be empty until it equals the configured maximum + * number of seconds. + */ + private int maximumSleepTime = DEFAULT_MAXIMUM_SLEEP_TIME; - } finally { - conn = close(conn); - } + /** + * Private default constructor. + */ + public SQLConsumer() { + // do nothing } /** - * {@inheritDoc} + * Initializes the object. It sets the object up based on configuration passed + * in. *

    - * Implemented to call {@link SQLClient#insertMessage(Connection,String)} - * on the backing {@link SQLClient}. - *

    + * The configuration is in JSON format: + * + *
    +     * {
    +     *   "connectionProvider": "<provider-registry-name>",
    +     *   "cleanDatabase": "<true|false>",
    +     *   "maximumRetries": "<retry-count>"
    +     *   "retryWaitTime": "<pause-milliseconds>",
    +     *   "leaseTime": "<lease-time-seconds>",
    +     *   "maximumLeaseCount": "<message-count>",
    +     *   "maximumSleepTime": "<sleep-time-seconds>"
    +     * }
    +     * 
    + * + * @param config Configuration string containing the needed information to + * connect to connect to the backing database to lease messages + * and consume them. + * + * @throws MessageConsumerSetupException If an initialization failure occurs. */ - public void enqueueMessage(String message) throws SQLException { - Connection conn = null; - try { - conn = SQLConsumer.this.getConnection(); + @Override + protected void doInit(JsonObject config) throws MessageConsumerSetupException { + try { + // check if we are cleaning the database + Boolean clean = getConfigBoolean(config, CLEAN_DATABASE_KEY, FALSE); + + // get the connection provider name + String providerKey = getConfigString(config, CONNECTION_PROVIDER_KEY, true); + + try { + this.connectionProvider = ConnectionProvider.REGISTRY.lookup(providerKey); + } catch (NameNotFoundException e) { + throw new MessageConsumerSetupException( + "No ConnectionProvider was registered to the name specified by the " + "\"" + + CONNECTION_PROVIDER_KEY + "\" initialization parameter: " + providerKey); + } + + // get the failure threshold + this.maximumRetries = getConfigInteger(config, MAXIMUM_RETRIES_KEY, 0, DEFAULT_MAXIMUM_RETRIES); + + // get the retry wait time + this.retryWaitTime = getConfigLong(config, RETRY_WAIT_TIME_KEY, 0L, DEFAULT_RETRY_WAIT_TIME); + + // get the lease time + this.leaseTime = getConfigInteger(config, LEASE_TIME_KEY, 1, DEFAULT_LEASE_TIME); + + // get the maximum lease count + this.maximumLeaseCount = getConfigInteger(config, MAXIMUM_LEASE_COUNT_KEY, 1, DEFAULT_MAXIMUM_LEASE_COUNT); + + // get the maximum sleep time + this.maximumSleepTime = getConfigInteger(config, MAXIMUM_SLEEP_TIME_KEY, 1, DEFAULT_MAXIMUM_SLEEP_TIME); - this.client.insertMessage(conn, message); + // initialize the SQLClient + this.sqlClient = this.initSQLClient(); - conn.commit(); + // initialize the message queue interface + this.messageQueue = this.initMessageQueue(); - } finally { - conn = close(conn); - } + // ensure the schema exists + this.ensureSchema(clean); + + // optionally register the MessageQueue interface + this.queueRegistryName = getConfigString(config, QUEUE_REGISTRY_NAME_KEY, false); + + if (this.queueRegistryName != null) { + this.registryToken = MESSAGE_QUEUE_REGISTRY.bind(this.queueRegistryName, this.messageQueue); + } + + } catch (Exception e) { + throw new MessageConsumerSetupException(e); + } } /** - * {@inheritDoc} + * Gets a JDBC {@link Connection} to use. Typically these are obtained from a + * backing pool so repeated calls to this function without closing the + * previously obtained {@link Connection} instances could exhaust the pool. This + * may block until a {@link Connection} is available. + * + * @return The {@link Connection} that was obtained. + * + * @throws SQLException If a JDBC failure occurs. */ - public SQLConsumer getSQLConsumer() { - return SQLConsumer.this; + protected Connection getConnection() throws SQLException { + return this.connectionProvider.getConnection(); } - } - - /** - * {@link Registry} used to register the {@link MessageQueue} instances - * associated with each {@link SQLConsumer}. In order to register the - * {@link MessageQueue} the {@link #QUEUE_REGISTRY_NAME_KEY} initialization - * parameter must be provided for the {@link SQLConsumer}. - */ - public static final Registry MESSAGE_QUEUE_REGISTRY = new Registry<>(false); - - /** - * The initialization parameter key used to obtain the name for binding - * the {@link MessageQueue} instance for interacting with the backing - * message queue of the {@link SQLConsumer} in the {@link - * #MESSAGE_QUEUE_REGISTRY}. There is no default value for this - * initialization parameter, if it is not specified then the {@link - * MessageQueue} is not registered in the {@link #MESSAGE_QUEUE_REGISTRY}. - * The {@link MessageQueue} is unbound when the {@link SQLConsumer} is - * destroyed. - */ - public static final String QUEUE_REGISTRY_NAME_KEY = "queueRegistryName"; - - /** - * The initialization parameter key for checking if the persistent store - * of messages should be dropped / deleted and recreated during - * initialization. Values should be true or false. - */ - public static final String CLEAN_DATABASE_KEY = "cleanDatabase"; - - /** - * The initialization parameter key for obtaining the {@link - * ConnectionProvider} to use for connecting to the database from the - * {@link ConnectionProvider#REGISTRY}. - */ - public static final String CONNECTION_PROVIDER_KEY = "connectionProvider"; - - /** - * The initialization parameter to configure the maximum number of times to - * retry a failed attempt to select messages from the database before - * aborting consumption. - */ - public static final String MAXIMUM_RETRIES_KEY = "maximumRetries"; - - /** - * The initialization parameter to configure the number of milliseconds to - * wait to retry when a failure occurs. This is only matters if the - * configured {@linkplain #MAXIMUM_RETRIES_KEY failure threshold} is - * greater than one (1). - */ - public static final String RETRY_WAIT_TIME_KEY = "retryWaitTime"; - - /** - * The initialization parameter to configure the number of seconds - * messages are leased on the database table before they become available - * to another consumer instance. If not configured then {@link - * #DEFAULT_LEASE_TIME} is used. Specifying this initialization parameter - * allows the clients to override. - */ - public static final String LEASE_TIME_KEY = "leaseTime"; - - /** - * The initialization parameter to configure the maximum number of messages - * to be leased from the database table at one time. If not configured - * then {@link #DEFAULT_MAXIMUM_LEASE_COUNT} is used. Specifying this - * initialization parameter allows clients to override. - */ - public static final String MAXIMUM_LEASE_COUNT_KEY = "maximumLeaseCount"; - - /** - * The initialization parameter to configure the maximum number of - * seconds to sleep when the database queue is found to be empty - * in order to avoid a busy loop of constant queries. The actual amount - * of time used for sleep will progressively increase as the message - * queue continues to be empty until it equals the configured maximum - * number of seconds. If not configured then {@link - * #DEFAULT_MAXIMUM_SLEEP_TIME} is used. Specifying this initialization - * parameter allows clients to override. - */ - public static final String MAXIMUM_SLEEP_TIME_KEY = "maximumSleepTime"; - - /** - * The default number of times to retry failed SQS requests before aborting - * consumption. The default value is {@value}. A different value can be set - * via the {#link #MAXIMUM_RETRIES_KEY} initialization parameter. - */ - public static final int DEFAULT_MAXIMUM_RETRIES = 0; - - /** - * The default number of milliseconds to wait before retrying the SQS request - * if the previous request failed. The default value is {@value}. A - * different value can be set via the {@link #RETRY_WAIT_TIME_KEY} - * initialization parameter. - */ - public static final long DEFAULT_RETRY_WAIT_TIME = 1000L; - - /** - * The default number of seconds to lease a message in the database table, - * preventing other consumers from obtaining it. The default value is {@value}. - * A different value can be set via the {@link #LEASE_TIME_KEY} initialization - * parameter. - */ - public static final int DEFAULT_LEASE_TIME = 1800; - - /** - * The default maximum number of messages to be leased from the database table - * at one time. The default value is {@value}. A different value can be set - * via the {@link #MAXIMUM_LEASE_COUNT_KEY} initialization parameter. - */ - public static final int DEFAULT_MAXIMUM_LEASE_COUNT = 100; - - /** - * The default maximum number of second to sleep when an empty queue is - * encountered in order to avoid a busy loop of querying the database. - * The actual amount of time used for sleep will progressively increase - * as the message queue continues to be empty until it equals the - * configured maximum number of seconds. The default value is {@value}. - * A different value can be set via the {@link #MAXIMUM_SLEEP_TIME_KEY} - * initialization parameter. - */ - public static final int DEFAULT_MAXIMUM_SLEEP_TIME = 10; - - /** - * Defined constant for one second in milliseconds. - */ - private static final long ONE_SECOND = 1000L; - - /** - * The {@link ConnectionProvider} to use for obtaining - * {@link Connection} instances. - */ - private ConnectionProvider connectionProvider; - - /** - * The {@link MessageQueue} for this instance. - */ - private MessageQueue messageQueue; - - /** - * The name for binding the {@link #messageQueue} in the {@link - * #MESSAGE_QUEUE_REGISTRY}. - */ - private String queueRegistryName = null; - - /** - * The {@link AccessToken} for unbinding the {@link #messageQueue} from the - * {@link #MESSAGE_QUEUE_REGISTRY}. - */ - private AccessToken registryToken = null; - - /** - * The {@link SQLClient} to use for interacting with the database. - */ - private SQLClient sqlClient; - - /** - * The consumption thread for this instance. - */ - private Thread consumptionThread = null; - - /** - * The maximum number of times to retry failed SQS requests before aborting - * consumption. - */ - private int maximumRetries = DEFAULT_MAXIMUM_RETRIES; - - /** - * The number of milliseconds to wait before retrying the SQS request if the - * previous request failed. - */ - private long retryWaitTime = DEFAULT_RETRY_WAIT_TIME; - - /** - * The configured number of seconds to lease messages from the database - * queue before they become available to other consumers. - */ - private int leaseTime = DEFAULT_LEASE_TIME; - - /** - * The configured maximum number of messages to lease at one time from the - * database queue. - */ - private int maximumLeaseCount = DEFAULT_MAXIMUM_LEASE_COUNT; - - /** - * The configured maximum number of second to sleep when an empty queue is - * encountered in order to avoid a busy loop of querying the database. - * The actual amount of time used for sleep will progressively increase - * as the message queue continues to be empty until it equals the - * configured maximum number of seconds. - */ - private int maximumSleepTime = DEFAULT_MAXIMUM_SLEEP_TIME; - - /** - * Private default constructor. - */ - public SQLConsumer() { - // do nothing - } - - /** - * Initializes the object. It sets the object up based on configuration - * passed in. - *

    - * The configuration is in JSON format: - * - *

    -   * {
    -   *   "connectionProvider": "<provider-registry-name>",
    -   *   "cleanDatabase": "<true|false>",
    -   *   "maximumRetries": "<retry-count>"
    -   *   "retryWaitTime": "<pause-milliseconds>",
    -   *   "leaseTime": "<lease-time-seconds>",
    -   *   "maximumLeaseCount": "<message-count>",
    -   *   "maximumSleepTime": "<sleep-time-seconds>"
    -   * }
    -   * 
    - * - * @param config Configuration string containing the needed information to - * connect to connect to the backing database to lease - * messages and consume them. - * - * @throws MessageConsumerSetupException If an initialization failure occurs. - */ - @Override - protected void doInit(JsonObject config) throws MessageConsumerSetupException { - try { - // check if we are cleaning the database - Boolean clean = getConfigBoolean(config, CLEAN_DATABASE_KEY, FALSE); - - // get the connection provider name - String providerKey = getConfigString(config, - CONNECTION_PROVIDER_KEY, - true); - - try { - this.connectionProvider = ConnectionProvider.REGISTRY.lookup(providerKey); - } catch (NameNotFoundException e) { - throw new MessageConsumerSetupException( - "No ConnectionProvider was registered to the name specified by the " - + "\"" + CONNECTION_PROVIDER_KEY + "\" initialization parameter: " - + providerKey); - } - - // get the failure threshold - this.maximumRetries = getConfigInteger(config, - MAXIMUM_RETRIES_KEY, - 0, - DEFAULT_MAXIMUM_RETRIES); - - // get the retry wait time - this.retryWaitTime = getConfigLong(config, - RETRY_WAIT_TIME_KEY, - 0L, - DEFAULT_RETRY_WAIT_TIME); - - // get the lease time - this.leaseTime = getConfigInteger(config, - LEASE_TIME_KEY, - 1, - DEFAULT_LEASE_TIME); - - // get the maximum lease count - this.maximumLeaseCount = getConfigInteger(config, - MAXIMUM_LEASE_COUNT_KEY, - 1, - DEFAULT_MAXIMUM_LEASE_COUNT); - - // get the maximum sleep time - this.maximumSleepTime = getConfigInteger(config, - MAXIMUM_SLEEP_TIME_KEY, - 1, - DEFAULT_MAXIMUM_SLEEP_TIME); - - // initialize the SQLClient - this.sqlClient = this.initSQLClient(); - - // initialize the message queue interface - this.messageQueue = this.initMessageQueue(); - - // ensure the schema exists - this.ensureSchema(clean); - - // optionally register the MessageQueue interface - this.queueRegistryName = getConfigString(config, - QUEUE_REGISTRY_NAME_KEY, - false); - - if (this.queueRegistryName != null) { - this.registryToken = MESSAGE_QUEUE_REGISTRY.bind( - this.queueRegistryName, this.messageQueue); - } - - } catch (Exception e) { - throw new MessageConsumerSetupException(e); + + /** + * Determines the {@link SQLClient} to use from the metadata obtained from the + * JDBC {@link Connection} via {@link #getConnection()} and returns the + * {@link SQLClient} instance. + * + * @return The {@link SQLClient} to use. + * + * @throws MessageConsumerSetupException If a failure occurs. + */ + protected SQLClient initSQLClient() throws MessageConsumerSetupException { + Connection conn = null; + try { + // get a connection + conn = this.getConnection(); + + // set the database type + DatabaseType databaseType = DatabaseType.detect(conn); + + // create the SQLClient instance + switch (databaseType) { + case POSTGRESQL: + return new PostgreSQLClient(); + case SQLITE: + return new SQLiteClient(); + default: + throw new MessageConsumerSetupException( + "The configured ConnectionProvider is associated with unsupported " + + "database type. databaseType=[ " + databaseType + " ]"); + } + + } catch (SQLException e) { + throw new MessageConsumerSetupException("Encountered a SQL failure during initialization.", e); + + } finally { + conn = close(conn); + } } - } - - /** - * Gets a JDBC {@link Connection} to use. Typically these are obtained from - * a backing pool so repeated calls to this function without closing the - * previously obtained {@link Connection} instances could exhaust the pool. - * This may block until a {@link Connection} is available. - * - * @return The {@link Connection} that was obtained. - * - * @throws SQLException If a JDBC failure occurs. - */ - protected Connection getConnection() throws SQLException { - return this.connectionProvider.getConnection(); - } - - /** - * Determines the {@link SQLClient} to use from the metadata obtained - * from the JDBC {@link Connection} via {@link #getConnection()} and - * returns the {@link SQLClient} instance. - * - * @return The {@link SQLClient} to use. - * - * @throws MessageConsumerSetupException If a failure occurs. - */ - protected SQLClient initSQLClient() throws MessageConsumerSetupException { - Connection conn = null; - try { - // get a connection - conn = this.getConnection(); - - // set the database type - DatabaseType databaseType = DatabaseType.detect(conn); - - // create the SQLClient instance - switch (databaseType) { - case POSTGRESQL: - return new PostgreSQLClient(); - case SQLITE: - return new SQLiteClient(); - default: - throw new MessageConsumerSetupException( - "The configured ConnectionProvider is associated with unsupported " - + "database type. databaseType=[ " + databaseType + " ]"); - } - - } catch (SQLException e) { - throw new MessageConsumerSetupException( - "Encountered a SQL failure during initialization.", e); - - } finally { - conn = close(conn); + + /** + * Gets the {@link SQLClient} used by this instance for interacting with the + * backing database. This returns null if the {@link SQLClient} has + * not yet been initialized. + * + * @return The {@link SQLClient} used by this instance for interacting with the + * backing database. + */ + protected SQLClient getSQLClient() { + return this.sqlClient; } - } - - /** - * Gets the {@link SQLClient} used by this instance for interacting with the - * backing database. This returns null if the {@link SQLClient} - * has not yet been initialized. - * - * @return The {@link SQLClient} used by this instance for interacting with - * the backing database. - */ - protected SQLClient getSQLClient() { - return this.sqlClient; - } - - /** - * Creates and initializes the {@link MessageQueue} instance to use with - * this {@link SQLConsumer}. - * - * @return The {@link MessageQueue} instance that was created to be used - * with this {@link SQLConsumer}. - */ - protected MessageQueue initMessageQueue() { - return new SimpleMessageQueue(this.getSQLClient()); - } - - /** - * Gets the {@link MessageQueue} interface for interacting with the - * backing message queue for this {@link SQLConsumer}. - * - * @return The {@link MessageQueue} interface for interacting with the - * backing message queue for this {@link SQLConsumer}. - */ - public MessageQueue getMessageQueue() { - return this.messageQueue; - } - - /** - * Ensures the schema exists and alternatively drops the existing the schema - * and recreates it. This is called from {@link #doInit(JsonObject)}. - * - * @param recreate true if the existing schema should be - * dropped, otherwise false. - * - * @throws SQLException If a failure occurs. - */ - protected void ensureSchema(boolean recreate) throws SQLException { - Connection conn = null; - try { - // get the connection - conn = this.getConnection(); - - // get the SQLClient - SQLClient sqlClient = this.getSQLClient(); - - // ensure the schema exists - sqlClient.ensureSchema(conn, recreate); - - } finally { - conn = close(conn); + + /** + * Creates and initializes the {@link MessageQueue} instance to use with this + * {@link SQLConsumer}. + * + * @return The {@link MessageQueue} instance that was created to be used with + * this {@link SQLConsumer}. + */ + protected MessageQueue initMessageQueue() { + return new SimpleMessageQueue(this.getSQLClient()); } - } - - /** - * Returns the maximum number times failed attempts to connect to the database - * will be retried before aborting message consumption. This defaults to {@link - * #DEFAULT_MAXIMUM_RETRIES} and can be configured via the - * {@link #MAXIMUM_RETRIES_KEY} configuration parameter. - * - * @return The maximum number of times failed attempts to connect to the - * database - * will be retried before aborting message consumption. - */ - public int getMaximumRetries() { - return this.maximumRetries; - } - - /** - * Returns the number of milliseconds to wait between database query retries - * when a failure occurs. This defaults to {@link #DEFAULT_RETRY_WAIT_TIME} - * and can be configured via the {@link #RETRY_WAIT_TIME_KEY} configuration - * parameter. - * - * @return The number of milliseconds to wait between database query retries - * when a failure occurs. - */ - public long getRetryWaitTime() { - return this.retryWaitTime; - } - - /** - * Gets the number of seconds messages will be leased from the - * database queue, preventing other processors from consuming those same - * messages until the lease has expired. - * - * @return The number of seconds messages will be leased from the - * database queue, preventing other processors from consuming those - * same messages until the lease has expired. - */ - public int getLeaseTime() { - return this.leaseTime; - } - - /** - * Gets the maximum number of messages to lease from the database queue - * at one time. - * - * @return The maximum number of messages to lease from the database - * queue at one time. - */ - public int getMaximumLeaseCount() { - return this.maximumLeaseCount; - } - - /** - * Gets the maximum number of seconds to sleep when an empty queue - * is encountered. The actual amount of time used for sleep will - * progressively increase as the message queue continues to be empty until - * it equals the configured maximum number of seconds. - * - * @return The maximum number of seconds to sleep when an empty queue - * is encountered. - */ - public int getMaximumSleepTime() { - return this.maximumSleepTime; - } - - /** - * Creates a virtually unique lease ID. - * - * @return A new lease ID to use. - */ - protected String generateLeaseId() { - long pid = ProcessHandle.current().pid(); - StringBuilder sb = new StringBuilder(); - sb.append(pid).append("|").append(Instant.now().toString()).append("|"); - sb.append(TextUtilities.randomAlphanumericText(50)); - return sb.toString(); - } - - /** - * Handles an SQL failure and checks if consumption should be aborted. - * - * @param failureCount The number of consecutive failures so far. - * @param failure The {@link Exception} that was thrown if available, - * otherwise null. - * @return true if consumption should abort, otherwise - * false. - */ - protected boolean handleFailure(int failureCount, Exception failure) { - // get the maximum number of retries - int maxRetries = this.getMaximumRetries(); - - logWarning(failure, - "FAILURE DETECTED: " + failureCount + " of " + maxRetries - + " consecutive failure(s)"); - - // check if we have exceeded the maximum failure count - if (failureCount > maxRetries) { - // return true to indicate that we should abort consumption - return true; - - } else { - // looks like we can retry - try { - Thread.sleep(this.getRetryWaitTime()); - } catch (InterruptedException ignore) { - // ignore the exception - } - return false; + + /** + * Gets the {@link MessageQueue} interface for interacting with the backing + * message queue for this {@link SQLConsumer}. + * + * @return The {@link MessageQueue} interface for interacting with the backing + * message queue for this {@link SQLConsumer}. + */ + public MessageQueue getMessageQueue() { + return this.messageQueue; } - } - - /** - * Implemented to launch a background thread that will read messages from - * the database queue and process them. - * - * @param processor Processes messages - * - * @throws MessageConsumerException If a failure occurs. - */ - @Override - protected void doConsume(MessageProcessor processor) - throws MessageConsumerException { - this.consumptionThread = new Thread(() -> { - int failureCount = 0; - long sleepTime = ONE_SECOND; - while (this.getState() == CONSUMING) { - // get the SQLClient - SQLClient sqlClient = this.getSQLClient(); - - // generate a lease ID - String leaseId = this.generateLeaseId(); - - // get the lease time (in seconds) - int leaseTime = this.getLeaseTime(); - - // get the maximum lease count - int maxLeaseCount = this.getMaximumLeaseCount(); - - // initialize the messages list - List messages = null; - - // initialize the connection - Connection conn = null; + /** + * Ensures the schema exists and alternatively drops the existing the schema and + * recreates it. This is called from {@link #doInit(JsonObject)}. + * + * @param recreate true if the existing schema should be dropped, + * otherwise false. + * + * @throws SQLException If a failure occurs. + */ + protected void ensureSchema(boolean recreate) throws SQLException { + Connection conn = null; try { - // get the connection - conn = this.getConnection(); + // get the connection + conn = this.getConnection(); + + // get the SQLClient + SQLClient sqlClient = this.getSQLClient(); + + // ensure the schema exists + sqlClient.ensureSchema(conn, recreate); + + } finally { + conn = close(conn); + } + } + + /** + * Returns the maximum number times failed attempts to connect to the database + * will be retried before aborting message consumption. This defaults to + * {@link #DEFAULT_MAXIMUM_RETRIES} and can be configured via the + * {@link #MAXIMUM_RETRIES_KEY} configuration parameter. + * + * @return The maximum number of times failed attempts to connect to the + * database will be retried before aborting message consumption. + */ + public int getMaximumRetries() { + return this.maximumRetries; + } + + /** + * Returns the number of milliseconds to wait between database query retries + * when a failure occurs. This defaults to {@link #DEFAULT_RETRY_WAIT_TIME} and + * can be configured via the {@link #RETRY_WAIT_TIME_KEY} configuration + * parameter. + * + * @return The number of milliseconds to wait between database query retries + * when a failure occurs. + */ + public long getRetryWaitTime() { + return this.retryWaitTime; + } + + /** + * Gets the number of seconds messages will be leased from the database + * queue, preventing other processors from consuming those same messages until + * the lease has expired. + * + * @return The number of seconds messages will be leased from the + * database queue, preventing other processors from consuming those same + * messages until the lease has expired. + */ + public int getLeaseTime() { + return this.leaseTime; + } + + /** + * Gets the maximum number of messages to lease from the database queue at one + * time. + * + * @return The maximum number of messages to lease from the database queue at + * one time. + */ + public int getMaximumLeaseCount() { + return this.maximumLeaseCount; + } - // first release any expired leases so we can lease those messages - int count = sqlClient.releaseExpiredLeases(conn, leaseTime); + /** + * Gets the maximum number of seconds to sleep when an empty queue is + * encountered. The actual amount of time used for sleep will progressively + * increase as the message queue continues to be empty until it equals the + * configured maximum number of seconds. + * + * @return The maximum number of seconds to sleep when an empty queue is + * encountered. + */ + public int getMaximumSleepTime() { + return this.maximumSleepTime; + } - // commit the transaction - conn.commit(); + /** + * Creates a virtually unique lease ID. + * + * @return A new lease ID to use. + */ + protected String generateLeaseId() { + long pid = ProcessHandle.current().pid(); + StringBuilder sb = new StringBuilder(); + sb.append(pid).append("|").append(Instant.now().toString()).append("|"); + sb.append(TextUtilities.randomAlphanumericText(50)); + return sb.toString(); + } - if (count > 0) { - logInfo("expired leases on " + count + " messages"); - } + /** + * Handles an SQL failure and checks if consumption should be aborted. + * + * @param failureCount The number of consecutive failures so far. + * @param failure The {@link Exception} that was thrown if available, + * otherwise null. + * @return true if consumption should abort, otherwise + * false. + */ + protected boolean handleFailure(int failureCount, Exception failure) { + // get the maximum number of retries + int maxRetries = this.getMaximumRetries(); - // lease messages - count = sqlClient.leaseMessages( - conn, leaseId, leaseTime, maxLeaseCount); + logWarning(failure, "FAILURE DETECTED: " + failureCount + " of " + maxRetries + " consecutive failure(s)"); - // commit and close the connection so the leases are marked - // and the connection is available - conn.commit(); - conn = close(conn); + // check if we have exceeded the maximum failure count + if (failureCount > maxRetries) { + // return true to indicate that we should abort consumption + return true; - // check if we have an empty queue - if (count == 0) { - failureCount = 0; + } else { + // looks like we can retry try { - Thread.sleep(sleepTime); + Thread.sleep(this.getRetryWaitTime()); } catch (InterruptedException ignore) { - // do nothing + // ignore the exception } - sleepTime = sleepTime * 2L; - long maxSleepTime = ONE_SECOND * ((long) this.getMaximumSleepTime()); + return false; + } + } - if (sleepTime > maxSleepTime) { - sleepTime = maxSleepTime; + /** + * Implemented to launch a background thread that will read messages from the + * database queue and process them. + * + * @param processor Processes messages + * + * @throws MessageConsumerException If a failure occurs. + */ + @Override + protected void doConsume(MessageProcessor processor) throws MessageConsumerException { + this.consumptionThread = new Thread(() -> { + int failureCount = 0; + long sleepTime = ONE_SECOND; + while (this.getState() == CONSUMING) { + // get the SQLClient + SQLClient sqlClient = this.getSQLClient(); + + // generate a lease ID + String leaseId = this.generateLeaseId(); + + // get the lease time (in seconds) + int leaseTime = this.getLeaseTime(); + + // get the maximum lease count + int maxLeaseCount = this.getMaximumLeaseCount(); + + // initialize the messages list + List messages = null; + + // initialize the connection + Connection conn = null; + + try { + // get the connection + conn = this.getConnection(); + + // first release any expired leases so we can lease those messages + int count = sqlClient.releaseExpiredLeases(conn, leaseTime); + + // commit the transaction + conn.commit(); + + if (count > 0) { + logInfo("expired leases on " + count + " messages"); + } + + // lease messages + count = sqlClient.leaseMessages(conn, leaseId, leaseTime, maxLeaseCount); + + // commit and close the connection so the leases are marked + // and the connection is available + conn.commit(); + conn = close(conn); + + // check if we have an empty queue + if (count == 0) { + failureCount = 0; + try { + Thread.sleep(sleepTime); + } catch (InterruptedException ignore) { + // do nothing + } + sleepTime = sleepTime * 2L; + long maxSleepTime = ONE_SECOND * ((long) this.getMaximumSleepTime()); + + if (sleepTime > maxSleepTime) { + sleepTime = maxSleepTime; + } + + // try again + continue; + } + + // we got a non-empty queue so restore the sleep time to one second + sleepTime = ONE_SECOND; + + // get the connection + conn = this.getConnection(); + + // get the list of leased messages + messages = sqlClient.getLeasedMessages(conn, leaseId); + + // close the connection + conn = close(conn); + + } catch (SQLException e) { + if (this.handleFailure(++failureCount, e)) { + // destroy and then return to abort consumption + this.destroy(); + return; + + } else { + // let's retry + continue; + } + } finally { + // close the connection + conn = close(conn); + } + + // if we get here then we have leased messages without a failure + // so we reset the failure count + failureCount = 0; + + // get the messages from the response + for (LeasedMessage message : messages) { + // enqueue the next message for processing -- this call may wait + // for enough room in the queue for the messages to be enqueued + this.enqueueMessages(processor, message); + } } + }); + + // start the thread + this.consumptionThread.start(); + } - // try again - continue; - } + /** + * {@inheritDoc} + *

    + * Overridden to renew the lease when a message is dequeued. + *

    + */ + @Override + protected synchronized InfoMessage dequeueMessage(MessageProcessor processor) { + // get the message + InfoMessage message = super.dequeueMessage(processor); + + // check if we got a message and renew its lease + if (message != null) { + Connection conn = null; + try { + // get a connection + conn = this.getConnection(); - // we got a non-empty queue so restore the sleep time to one second - sleepTime = ONE_SECOND; + // get the SQLClient + SQLClient sqlClient = this.getSQLClient(); - // get the connection - conn = this.getConnection(); + // get the lease time + int leaseTime = this.getLeaseTime(); - // get the list of leased messages - messages = sqlClient.getLeasedMessages(conn, leaseId); + // get the leased message + LeasedMessage leasedMessage = message.getBatch().getMessage(); - // close the connection - conn = close(conn); + // renew the lease + sqlClient.renewLease(conn, leasedMessage, leaseTime); - } catch (SQLException e) { - if (this.handleFailure(++failureCount, e)) { - // destroy and then return to abort consumption - this.destroy(); - return; - - } else { - // let's retry - continue; - } - } finally { - // close the connection - conn = close(conn); - } + // commit the connection + conn.commit(); - // if we get here then we have leased messages without a failure - // so we reset the failure count - failureCount = 0; + } catch (SQLException e) { + logWarning(e, "Ignoring exception while renewing message lease:", message); - // get the messages from the response - for (LeasedMessage message : messages) { - // enqueue the next message for processing -- this call may wait - // for enough room in the queue for the messages to be enqueued - this.enqueueMessages(processor, message); + } finally { + conn = close(conn); + } } - } - }); - - // start the thread - this.consumptionThread.start(); - } - - /** - * {@inheritDoc} - *

    - * Overridden to renew the lease when a message is dequeued. - *

    - */ - @Override - protected synchronized InfoMessage dequeueMessage( - MessageProcessor processor) { - // get the message - InfoMessage message = super.dequeueMessage(processor); - - // check if we got a message and renew its lease - if (message != null) { - Connection conn = null; - try { - // get a connection - conn = this.getConnection(); - - // get the SQLClient - SQLClient sqlClient = this.getSQLClient(); - - // get the lease time - int leaseTime = this.getLeaseTime(); - - // get the leased message - LeasedMessage leasedMessage = message.getBatch().getMessage(); - - // renew the lease - sqlClient.renewLease(conn, leasedMessage, leaseTime); - - // commit the connection - conn.commit(); - - } catch (SQLException e) { - logWarning(e, "Ignoring exception while renewing message lease:", message); - - } finally { - conn = close(conn); - } + + // return the message + return message; } - // return the message - return message; - } - - /** - * {@inheritDoc} - */ - @Override - protected String extractMessageBody(LeasedMessage message) { - return message.getMessageText(); - } - - /** - * {@inheritDoc} - */ - @Override - protected void disposeMessage(LeasedMessage message) { - Connection conn = null; - try { - // get the connection - conn = this.getConnection(); - - // get the SQLClient - SQLClient sqlClient = this.getSQLClient(); - - // get the message ID and lease ID - long messageId = message.getMessageId(); - String leaseId = message.getLeaseId(); - - // delete the message - sqlClient.deleteMessage(conn, messageId, leaseId); - - // commit the transaction - conn.commit(); - - } catch (Exception e) { - logWarning(e, "Ignoring exception while acknowledging message:", message); - - } finally { - conn = close(conn); + /** + * {@inheritDoc} + */ + @Override + protected String extractMessageBody(LeasedMessage message) { + return message.getMessageText(); } - } - - /** - * {@inheritDoc} - */ - @Override - protected void doDestroy() { - // join to the consumption thread - try { - this.consumptionThread.join(); - synchronized (this) { - this.consumptionThread = null; - } - - // unregister the the message queue if registered - if (this.registryToken != null && this.queueRegistryName != null - && MESSAGE_QUEUE_REGISTRY.isBound(this.queueRegistryName)) { + + /** + * {@inheritDoc} + */ + @Override + protected void disposeMessage(LeasedMessage message) { + Connection conn = null; try { - MESSAGE_QUEUE_REGISTRY.unbind(this.queueRegistryName, - this.registryToken); - this.registryToken = null; - this.queueRegistryName = null; + // get the connection + conn = this.getConnection(); + + // get the SQLClient + SQLClient sqlClient = this.getSQLClient(); + + // get the message ID and lease ID + long messageId = message.getMessageId(); + String leaseId = message.getLeaseId(); + + // delete the message + sqlClient.deleteMessage(conn, messageId, leaseId); + + // commit the transaction + conn.commit(); + } catch (Exception e) { - logWarning(e, "Ignoring exception while unbinding MessageQueue."); - } - } + logWarning(e, "Ignoring exception while acknowledging message:", message); - } catch (InterruptedException ignore) { - // ignore + } finally { + conn = close(conn); + } } - } - - /** - * Provides a means to test this class from the command-line. - * - * @param args The command-line arguments. - */ - public static void main(String[] args) { - // check if no arguments specified - if (args.length != 1 && args.length != 2 && args.length != 6) { - System.err.println("Unexpected number of command-line arguments."); - printUsage(); - System.exit(1); + + /** + * {@inheritDoc} + */ + @Override + protected void doDestroy() { + // join to the consumption thread + try { + this.consumptionThread.join(); + synchronized (this) { + this.consumptionThread = null; + } + + // unregister the the message queue if registered + if (this.registryToken != null && this.queueRegistryName != null + && MESSAGE_QUEUE_REGISTRY.isBound(this.queueRegistryName)) { + try { + MESSAGE_QUEUE_REGISTRY.unbind(this.queueRegistryName, this.registryToken); + this.registryToken = null; + this.queueRegistryName = null; + } catch (Exception e) { + logWarning(e, "Ignoring exception while unbinding MessageQueue."); + } + } + + } catch (InterruptedException ignore) { + // ignore + } } - try { - DatabaseType dbType = DatabaseType.valueOf(args[0]); - - Connector connector = null; - int minPoolSize = 1; - int maxPoolSize = 1; - switch (dbType) { - case SQLITE: - if (args.length == 1) { - SQLiteConnector conn = new SQLiteConnector(); - File file = conn.getSqliteFile(); - System.out.println("SQLite File: " + file); - connector = conn; - - } else if (args.length == 2) { - connector = new SQLiteConnector(args[1]); - } else { - System.err.println("Unexpected number of command-line arguments."); - printUsage(); - System.exit(1); - } - break; - case POSTGRESQL: - if (args.length != 6) { + /** + * Provides a means to test this class from the command-line. + * + * @param args The command-line arguments. + */ + public static void main(String[] args) { + // check if no arguments specified + if (args.length != 1 && args.length != 2 && args.length != 6) { System.err.println("Unexpected number of command-line arguments."); printUsage(); System.exit(1); - } - String host = args[1]; - int port = Integer.parseInt(args[2]); - String database = args[3]; - String user = args[4]; - String password = args[5]; - - connector = new PostgreSqlConnector( - host, port, database, user, password); - minPoolSize = 2; - maxPoolSize = 5; - - break; - default: - System.err.println("Unsupported database type: " + dbType); - printUsage(); - System.exit(1); - break; - } - - ConnectionPool pool = new ConnectionPool(connector, minPoolSize, maxPoolSize); - - ConnectionProvider provider = new PoolConnectionProvider(pool); - - ConnectionProvider.REGISTRY.bind("test-provider", provider); - - JsonObjectBuilder builder = Json.createObjectBuilder(); - builder.add(CLEAN_DATABASE_KEY, false); - builder.add(CONNECTION_PROVIDER_KEY, "test-provider"); - builder.add(QUEUE_REGISTRY_NAME_KEY, "message-queue"); - - JsonObject config = builder.build(); - - SQLConsumer consumer = new SQLConsumer(); - consumer.init(config); - - consumer.consume((jsonMessage) -> { - String recordId = jsonMessage.getString("RECORD_ID"); - JsonArray array = jsonMessage.getJsonArray("AFFECTED_ENTITIES"); - for (JsonObject obj : array.getValuesAs(JsonObject.class)) { - long entityId = obj.getJsonNumber("ENTITY_ID").longValue(); - - System.out.println(); - System.out.println( - "ENTITY ID: " + entityId + " / RECORD ID: " + recordId); } - }); - MessageQueue messageQueue = SQLConsumer.MESSAGE_QUEUE_REGISTRY.lookup("message-queue"); + try { + DatabaseType dbType = DatabaseType.valueOf(args[0]); + + Connector connector = null; + int minPoolSize = 1; + int maxPoolSize = 1; + switch (dbType) { + case SQLITE: + if (args.length == 1) { + SQLiteConnector conn = new SQLiteConnector(); + File file = conn.getSqliteFile(); + System.out.println("SQLite File: " + file); + connector = conn; + + } else if (args.length == 2) { + connector = new SQLiteConnector(args[1]); + } else { + System.err.println("Unexpected number of command-line arguments."); + printUsage(); + System.exit(1); + } + break; + case POSTGRESQL: + if (args.length != 6) { + System.err.println("Unexpected number of command-line arguments."); + printUsage(); + System.exit(1); + } + String host = args[1]; + int port = Integer.parseInt(args[2]); + String database = args[3]; + String user = args[4]; + String password = args[5]; + + connector = new PostgreSqlConnector(host, port, database, user, password); + minPoolSize = 2; + maxPoolSize = 5; + + break; + default: + System.err.println("Unsupported database type: " + dbType); + printUsage(); + System.exit(1); + break; + } + + ConnectionPool pool = new ConnectionPool(connector, minPoolSize, maxPoolSize); - int entityId = 10; - int recordId = 100000; - int messageCount = 0; - for (int index1 = 0; index1 < 100; index1++) { - JsonArrayBuilder jab = Json.createArrayBuilder(); - for (int index2 = 0; index2 < 15; index2++) { - JsonObjectBuilder job = Json.createObjectBuilder(); - job.add("DATA_SOURCE", "CUSTOMERS"); - job.add("RECORD_ID", String.valueOf(recordId++)); + ConnectionProvider provider = new PoolConnectionProvider(pool); - JsonArrayBuilder jab2 = Json.createArrayBuilder(); + ConnectionProvider.REGISTRY.bind("test-provider", provider); - JsonObjectBuilder job2 = Json.createObjectBuilder(); - job2.add("ENTITY_ID", entityId++); - jab2.add(job2); - job.add("AFFECTED_ENTITIES", jab2); + JsonObjectBuilder builder = Json.createObjectBuilder(); + builder.add(CLEAN_DATABASE_KEY, false); + builder.add(CONNECTION_PROVIDER_KEY, "test-provider"); + builder.add(QUEUE_REGISTRY_NAME_KEY, "message-queue"); - jab.add(job); - } - String message = JsonUtilities.toJsonText(jab); - messageQueue.enqueueMessage(message); - messageCount++; - } - - System.out.println(); - System.out.println("ENQUEUED " + messageCount + " MESSAGES"); - - // wait until the queue is empty - for (int index = 0; !messageQueue.isEmpty(); index++) { - Thread.sleep(1000L); - if (index % 10 == 0) { - java.util.Map statistics = consumer.getStatistics(); - System.out.println(); - System.out.println("------------------------------"); - statistics.forEach((stat, value) -> { - System.out.println( - stat.getName() + " : " + value + " " + stat.getUnits()); - }); - System.out.println(); - System.out.println("QUEUE SIZE : " + messageQueue.getMessageCount()); - System.out.println("------------------------------"); - } - } + JsonObject config = builder.build(); + + SQLConsumer consumer = new SQLConsumer(); + consumer.init(config); + + consumer.consume((jsonMessage) -> { + String recordId = jsonMessage.getString("RECORD_ID"); + JsonArray array = jsonMessage.getJsonArray("AFFECTED_ENTITIES"); + for (JsonObject obj : array.getValuesAs(JsonObject.class)) { + long entityId = obj.getJsonNumber("ENTITY_ID").longValue(); + + System.out.println(); + System.out.println("ENTITY ID: " + entityId + " / RECORD ID: " + recordId); + } + }); + + MessageQueue messageQueue = SQLConsumer.MESSAGE_QUEUE_REGISTRY.lookup("message-queue"); - System.out.println(); - System.out.println("QUEUE EMPTY : " + messageQueue.isEmpty()); - System.out.println("QUEUE SIZE : " + messageQueue.getMessageCount()); + int entityId = 10; + int recordId = 100000; + int messageCount = 0; + for (int index1 = 0; index1 < 100; index1++) { + JsonArrayBuilder jab = Json.createArrayBuilder(); + for (int index2 = 0; index2 < 15; index2++) { + JsonObjectBuilder job = Json.createObjectBuilder(); + job.add("DATA_SOURCE", "CUSTOMERS"); + job.add("RECORD_ID", String.valueOf(recordId++)); - // destroy the consumer - consumer.destroy(); - pool.shutdown(); + JsonArrayBuilder jab2 = Json.createArrayBuilder(); - } catch (Exception e) { - e.printStackTrace(); - printUsage(); - System.exit(1); + JsonObjectBuilder job2 = Json.createObjectBuilder(); + job2.add("ENTITY_ID", entityId++); + jab2.add(job2); + job.add("AFFECTED_ENTITIES", jab2); + + jab.add(job); + } + String message = JsonUtilities.toJsonText(jab); + messageQueue.enqueueMessage(message); + messageCount++; + } + + System.out.println(); + System.out.println("ENQUEUED " + messageCount + " MESSAGES"); + + // wait until the queue is empty + for (int index = 0; !messageQueue.isEmpty(); index++) { + Thread.sleep(1000L); + if (index % 10 == 0) { + java.util.Map statistics = consumer.getStatistics(); + System.out.println(); + System.out.println("------------------------------"); + statistics.forEach((stat, value) -> { + System.out.println(stat.getName() + " : " + value + " " + stat.getUnits()); + }); + System.out.println(); + System.out.println("QUEUE SIZE : " + messageQueue.getMessageCount()); + System.out.println("------------------------------"); + } + } + + System.out.println(); + System.out.println("QUEUE EMPTY : " + messageQueue.isEmpty()); + System.out.println("QUEUE SIZE : " + messageQueue.getMessageCount()); + + // destroy the consumer + consumer.destroy(); + pool.shutdown(); + + } catch (Exception e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + printUsage(); + System.exit(1); + } + } + + /** + * + */ + private static void printUsage() { + System.err.println(); + System.err.println("COMMAND-LINE ARGUMENT OPTIONS: "); + System.err.println(" - For SQLite with an auto-created temporary file:"); + System.err.println(" SQLITE"); + System.err.println(" - For SQLite with a specific database file:"); + System.err.println(" SQLITE "); + System.err.println(" - For PostrgreSQL:"); + System.err.println(" POSTGRESQL "); + System.err.println(); } - } - - /** - * - */ - private static void printUsage() { - System.err.println(); - System.err.println("COMMAND-LINE ARGUMENT OPTIONS: "); - System.err.println( - " - For SQLite with an auto-created temporary file:"); - System.err.println( - " SQLITE"); - System.err.println( - " - For SQLite with a specific database file:"); - System.err.println( - " SQLITE "); - System.err.println( - " - For PostrgreSQL:"); - System.err.println( - " POSTGRESQL "); - System.err.println(); - } } diff --git a/src/main/java/com/senzing/listener/communication/sqs/SQSConsumer.java b/src/main/java/com/senzing/listener/communication/sqs/SQSConsumer.java index aa3c13c..52b11d6 100755 --- a/src/main/java/com/senzing/listener/communication/sqs/SQSConsumer.java +++ b/src/main/java/com/senzing/listener/communication/sqs/SQSConsumer.java @@ -21,349 +21,327 @@ * A consumer for SQS. */ public class SQSConsumer extends AbstractMessageConsumer { - /** - * The initialization parameter for the SQS URL. There is no default value - * so this configuration parameter is required. - */ - public static final String SQS_URL_KEY = "sqsUrl"; - - /** - * The initialization parameter to configure the maximum number of times to - * retry a failed SQS request before aborting consumption. - */ - public static final String MAXIMUM_RETRIES_KEY = "maximumRetries"; - - /** - * The initialization parameter to configure the number of milliseconds to - * wait to retry when a failure occurs. This is only matters if the - * configured {@linkplain #MAXIMUM_RETRIES_KEY failure threshold} is - * greater than one (1). - */ - public static final String RETRY_WAIT_TIME_KEY = "retryWaitTime"; - - /** - * The initialization parameter to configure the number of seconds - * messages on the SQS queue are hidden from subsequent retrieve requests - * after having been retrieved. If not configured then the value configured - * on the queue itself is used. Specifying this initialization parameter - * allows the client to override. - */ - public static final String VISIBILITY_TIMEOUT_KEY = "visibilityTimeout"; - - /** - * The default number of times to retry failed SQS requests before aborting - * consumption. The default value is {@value}. A different value can be set - * via the {#link #MAXIMUM_RETRIES_KEY} parameter. - */ - public static final int DEFAULT_MAXIMUM_RETRIES = 0; - - /** - * The default number of milliseconds to wait before retrying the SQS request - * if the previous request failed. The default value is {@value}. A - * different value can be set via the {@link #RETRY_WAIT_TIME_KEY} parameter. - */ - public static final long DEFAULT_RETRY_WAIT_TIME = 1000L; - - /** - * The SQS URL. - */ - private String sqsUrl; - - /** - * The {@link SqsClient} for the connection to SQS. - */ - private SqsClient sqsClient; - - /** - * The consumption thread for this instance. - */ - private Thread consumptionThread = null; - - /** - * The maximum number of times to retry failed SQS requests before aborting - * consumption. - */ - private int maximumRetries = DEFAULT_MAXIMUM_RETRIES; - - /** - * The number of milliseconds to wait before retrying the SQS request if the - * previous request failed. - */ - private long retryWaitTime = DEFAULT_RETRY_WAIT_TIME; - - /** - * The configured visibility timeout or null if the queue's - * configured value should be used. - */ - private Integer visibilityTimeout = null; - - /** - * Wait parameter in seconds to SQS in case no messages are waiting to be collected. - */ - private static final int SQS_WAIT_SECS = 10; - - /** - * Generates a SQS consumer. - * - * @return The created {@link SQSConsumer} instance. - */ - public static SQSConsumer generateSQSConsumer() { - return new SQSConsumer(); - } - - /** - * Private default constructor. - */ - public SQSConsumer() { - // do nothing - } - - /** - * Initializes the object. It sets the object up based on configuration - * passed in. - *

    - * The configuration is in JSON format: - *

    -   * {
    -   *   "sqsUrl": "<URL>",
    -   *   "concurrency": "<thread-count>",
    -   *   "failureThreshold": "<failure-threshold>",
    -   *   "retryWaitTime": "<pause-milliseconds>",
    -   *   "visibilityTimeout": "<timeout-seconds>"
    -   * }
    -   * 
    - * - * @param config Configuration string containing the needed information to - * connect to SQS. - * - * @throws MessageConsumerSetupException If an initialization failure occurs. - */ - @Override - protected void doInit(JsonObject config) throws MessageConsumerSetupException - { - try { - // get the SQS URL - this.sqsUrl = getConfigString(config, SQS_URL_KEY, true); - - // get the failure threshold - this.maximumRetries = getConfigInteger(config, - MAXIMUM_RETRIES_KEY, - 0, - DEFAULT_MAXIMUM_RETRIES); - - // get the retry wait time - this.retryWaitTime = getConfigLong(config, - RETRY_WAIT_TIME_KEY, - 0L, - DEFAULT_RETRY_WAIT_TIME); - - // get the visibility timeout - this.visibilityTimeout = getConfigInteger(config, - VISIBILITY_TIMEOUT_KEY, - 1, - null); - - this.sqsClient = SqsClient.builder().build(); - - } catch (RuntimeException e) { - throw new MessageConsumerSetupException(e); + /** + * The initialization parameter for the SQS URL. There is no default value so + * this configuration parameter is required. + */ + public static final String SQS_URL_KEY = "sqsUrl"; + + /** + * The initialization parameter to configure the maximum number of times to + * retry a failed SQS request before aborting consumption. + */ + public static final String MAXIMUM_RETRIES_KEY = "maximumRetries"; + + /** + * The initialization parameter to configure the number of milliseconds to wait + * to retry when a failure occurs. This is only matters if the configured + * {@linkplain #MAXIMUM_RETRIES_KEY failure threshold} is greater than one (1). + */ + public static final String RETRY_WAIT_TIME_KEY = "retryWaitTime"; + + /** + * The initialization parameter to configure the number of seconds + * messages on the SQS queue are hidden from subsequent retrieve requests after + * having been retrieved. If not configured then the value configured on the + * queue itself is used. Specifying this initialization parameter allows the + * client to override. + */ + public static final String VISIBILITY_TIMEOUT_KEY = "visibilityTimeout"; + + /** + * The default number of times to retry failed SQS requests before aborting + * consumption. The default value is {@value}. A different value can be set via + * the {#link #MAXIMUM_RETRIES_KEY} parameter. + */ + public static final int DEFAULT_MAXIMUM_RETRIES = 0; + + /** + * The default number of milliseconds to wait before retrying the SQS request if + * the previous request failed. The default value is {@value}. A different value + * can be set via the {@link #RETRY_WAIT_TIME_KEY} parameter. + */ + public static final long DEFAULT_RETRY_WAIT_TIME = 1000L; + + /** + * The SQS URL. + */ + private String sqsUrl; + + /** + * The {@link SqsClient} for the connection to SQS. + */ + private SqsClient sqsClient; + + /** + * The consumption thread for this instance. + */ + private Thread consumptionThread = null; + + /** + * The maximum number of times to retry failed SQS requests before aborting + * consumption. + */ + private int maximumRetries = DEFAULT_MAXIMUM_RETRIES; + + /** + * The number of milliseconds to wait before retrying the SQS request if the + * previous request failed. + */ + private long retryWaitTime = DEFAULT_RETRY_WAIT_TIME; + + /** + * The configured visibility timeout or null if the queue's + * configured value should be used. + */ + private Integer visibilityTimeout = null; + + /** + * Wait parameter in seconds to SQS in case no messages are waiting to be + * collected. + */ + private static final int SQS_WAIT_SECS = 10; + + /** + * Generates a SQS consumer. + * + * @return The created {@link SQSConsumer} instance. + */ + public static SQSConsumer generateSQSConsumer() { + return new SQSConsumer(); } - } - - /** - * Returns the maximum number times failed SQS requests will be retried before - * aborting message consumption. This defaults to {@link - * #DEFAULT_MAXIMUM_RETRIES} and can be configured via the - * {@link #MAXIMUM_RETRIES_KEY} configuration parameter. - * - * @return The maximum number of times failed SQS requests will be retried - * before aborting message consumption. - */ - public int getMaximumRetries() { - return this.maximumRetries; - } - - /** - * Returns the number of milliseconds to wait between SQS request retries - * when a failure occurs. This defaults to {@link #DEFAULT_RETRY_WAIT_TIME} - * and can be configured via the {@link #RETRY_WAIT_TIME_KEY} configuration - * parameter. - * - * @return The number of milliseconds to wait between SQS request retries - * when a failure occurs. - */ - public long getRetryWaitTime() { - return this.retryWaitTime; - } - - /** - * Returns the number of seconds messages on the SQS queue are hidden - * from subsequent retrieve requests after having been retrieved. If this - * returns null then the value configured on the queue itself - * is used. - * - * @return The number of seconds messages on the SQS queue are hidden - * from subsequent retrieve requests after having been retrieved, or - * null if the queue's configured value is used. - */ - public Integer getVisibilityTimeout() { return this.visibilityTimeout; } - - /** - * Returns the configured SQS URL. - * - * @return The configured SQS URL. - */ - public String getSqsUrl() { - return this.sqsUrl; - } - - /** - * Handles an SQS failure and checks if consumption should be aborted. - * - * @param failureCount The number of consecutive failures so far. - * @param response The SQS response, or null if not known. - * @param failure The {@link Exception} that was thrown if available, - * otherwise null. - * @return true if consumption should abort, otherwise - * false. - */ - protected boolean handleFailure(int failureCount, - ReceiveMessageResponse response, - Exception failure) - { - // get the maximum number of retries - int maxRetries = this.getMaximumRetries(); - - logWarning(failure, - "FAILURE DETECTED: " + failureCount + " of " + maxRetries - + " consecutive failure(s)", - ((response != null) - ? ("Received SQS HTTP error response code: " - + response.sdkHttpResponse().statusCode() - + " / " + response.sdkHttpResponse().statusText()) - : "*** No HTTP Response ***"), - "SQS URL: " + this.getSqsUrl()); - - // check if we have exceeded the maximum failure count - if (failureCount > maxRetries) { - // return true to indicate that we should abort consumption - return true; - - } else { - // looks like we can retry - try { - Thread.sleep(this.getRetryWaitTime()); - } catch (InterruptedException ignore) { - // ignore the exception - } - return false; + + /** + * Private default constructor. + */ + public SQSConsumer() { + // do nothing } - } - - /** - * Sets up a SQS consumer and then receives messages from SQS and - * feeds to service. - * - * @param processor Processes messages - * - * @throws MessageConsumerException If a failure occurs. - */ - @Override - protected void doConsume(MessageProcessor processor) - throws MessageConsumerException - { - this.consumptionThread = new Thread(() -> { - int failureCount = 0; - while (this.getState() == CONSUMING) { + + /** + * Initializes the object. It sets the object up based on configuration passed + * in. + *

    + * The configuration is in JSON format: + * + *

    +     * {
    +     *   "sqsUrl": "<URL>",
    +     *   "concurrency": "<thread-count>",
    +     *   "failureThreshold": "<failure-threshold>",
    +     *   "retryWaitTime": "<pause-milliseconds>",
    +     *   "visibilityTimeout": "<timeout-seconds>"
    +     * }
    +     * 
    + * + * @param config Configuration string containing the needed information to + * connect to SQS. + * + * @throws MessageConsumerSetupException If an initialization failure occurs. + */ + @Override + protected void doInit(JsonObject config) throws MessageConsumerSetupException { try { - ReceiveMessageRequest request = ReceiveMessageRequest.builder() - .queueUrl(this.getSqsUrl()) - .waitTimeSeconds(SQS_WAIT_SECS) - .visibilityTimeout(this.getVisibilityTimeout()) - .build(); - - ReceiveMessageResponse response = sqsClient.receiveMessage(request); - - // failed obtaining a response - if (!response.sdkHttpResponse().isSuccessful()) { - int responseCode = response.sdkHttpResponse().statusCode(); - if (this.handleFailure(++failureCount, response, null)) { - // destroy and then return to abort consumption - this.destroy(); - return; - - } else { - // let's retry - continue; + // get the SQS URL + this.sqsUrl = getConfigString(config, SQS_URL_KEY, true); + + // get the failure threshold + this.maximumRetries = getConfigInteger(config, MAXIMUM_RETRIES_KEY, 0, DEFAULT_MAXIMUM_RETRIES); + + // get the retry wait time + this.retryWaitTime = getConfigLong(config, RETRY_WAIT_TIME_KEY, 0L, DEFAULT_RETRY_WAIT_TIME); + + // get the visibility timeout + this.visibilityTimeout = getConfigInteger(config, VISIBILITY_TIMEOUT_KEY, 1, null); + + this.sqsClient = SqsClient.builder().build(); + + } catch (RuntimeException e) { + throw new MessageConsumerSetupException(e); + } + } + + /** + * Returns the maximum number times failed SQS requests will be retried before + * aborting message consumption. This defaults to + * {@link #DEFAULT_MAXIMUM_RETRIES} and can be configured via the + * {@link #MAXIMUM_RETRIES_KEY} configuration parameter. + * + * @return The maximum number of times failed SQS requests will be retried + * before aborting message consumption. + */ + public int getMaximumRetries() { + return this.maximumRetries; + } + + /** + * Returns the number of milliseconds to wait between SQS request retries when a + * failure occurs. This defaults to {@link #DEFAULT_RETRY_WAIT_TIME} and can be + * configured via the {@link #RETRY_WAIT_TIME_KEY} configuration parameter. + * + * @return The number of milliseconds to wait between SQS request retries when a + * failure occurs. + */ + public long getRetryWaitTime() { + return this.retryWaitTime; + } + + /** + * Returns the number of seconds messages on the SQS queue are hidden + * from subsequent retrieve requests after having been retrieved. If this + * returns null then the value configured on the queue itself is + * used. + * + * @return The number of seconds messages on the SQS queue are hidden + * from subsequent retrieve requests after having been retrieved, or + * null if the queue's configured value is used. + */ + public Integer getVisibilityTimeout() { + return this.visibilityTimeout; + } + + /** + * Returns the configured SQS URL. + * + * @return The configured SQS URL. + */ + public String getSqsUrl() { + return this.sqsUrl; + } + + /** + * Handles an SQS failure and checks if consumption should be aborted. + * + * @param failureCount The number of consecutive failures so far. + * @param response The SQS response, or null if not known. + * @param failure The {@link Exception} that was thrown if available, + * otherwise null. + * @return true if consumption should abort, otherwise + * false. + */ + protected boolean handleFailure(int failureCount, ReceiveMessageResponse response, Exception failure) { + // get the maximum number of retries + int maxRetries = this.getMaximumRetries(); + + logWarning(failure, "FAILURE DETECTED: " + failureCount + " of " + maxRetries + " consecutive failure(s)", + ((response != null) + ? ("Received SQS HTTP error response code: " + response.sdkHttpResponse().statusCode() + " / " + + response.sdkHttpResponse().statusText()) + : "*** No HTTP Response ***"), + "SQS URL: " + this.getSqsUrl()); + + // check if we have exceeded the maximum failure count + if (failureCount > maxRetries) { + // return true to indicate that we should abort consumption + return true; + + } else { + // looks like we can retry + try { + Thread.sleep(this.getRetryWaitTime()); + } catch (InterruptedException ignore) { + // ignore the exception + } + return false; + } + } + + /** + * Sets up a SQS consumer and then receives messages from SQS and feeds to + * service. + * + * @param processor Processes messages + * + * @throws MessageConsumerException If a failure occurs. + */ + @Override + protected void doConsume(MessageProcessor processor) throws MessageConsumerException { + this.consumptionThread = new Thread(() -> { + int failureCount = 0; + while (this.getState() == CONSUMING) { + try { + ReceiveMessageRequest request = ReceiveMessageRequest.builder().queueUrl(this.getSqsUrl()) + .waitTimeSeconds(SQS_WAIT_SECS).visibilityTimeout(this.getVisibilityTimeout()).build(); + + ReceiveMessageResponse response = sqsClient.receiveMessage(request); + + // failed obtaining a response + if (!response.sdkHttpResponse().isSuccessful()) { + int responseCode = response.sdkHttpResponse().statusCode(); + if (this.handleFailure(++failureCount, response, null)) { + // destroy and then return to abort consumption + this.destroy(); + return; + + } else { + // let's retry + continue; + } + + } else { + // reset the consecutive failure count + failureCount = 0; + } + + // get the messages from the response + List messages = response.messages(); + for (Message message : messages) { + // enqueue the next message for processing -- this call may wait + // for enough room in the queue for the messages to be enqueued + this.enqueueMessages(processor, message); + } + + } catch (SdkException e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + failureCount++; + + } } + }); - } else { - // reset the consecutive failure count - failureCount = 0; - } + // start the thread + this.consumptionThread.start(); + } + + /** + * Extracts the message body from the specified {@link Message}. + * + * @param message The {@link Message} from which to extract the message body. + */ + @Override + protected String extractMessageBody(Message message) { + return message.body(); + } - // get the messages from the response - List messages = response.messages(); - for (Message message : messages) { - // enqueue the next message for processing -- this call may wait - // for enough room in the queue for the messages to be enqueued - this.enqueueMessages(processor, message); - } + /** + * Disposes the specified {@link Message}. + * + * @param message The {@link Message} from which to extract the message body. + */ + @Override + protected void disposeMessage(Message message) { + String receiptHandle = message.receiptHandle(); - } catch (SdkException e) { - e.printStackTrace(); - failureCount++; + DeleteMessageRequest deleteMessageRequest = DeleteMessageRequest.builder().queueUrl(this.getSqsUrl()) + .receiptHandle(receiptHandle).build(); + sqsClient.deleteMessage(deleteMessageRequest); + } + + /** + * {@inheritDoc} + */ + @Override + protected void doDestroy() { + // join to the consumption thread + try { + this.consumptionThread.join(); + synchronized (this) { + this.consumptionThread = null; + } + } catch (InterruptedException ignore) { + // ignore } - } - }); - - // start the thread - this.consumptionThread.start(); - } - - /** - * Extracts the message body from the specified {@link Message}. - * - * @param message The {@link Message} from which to extract the - * message body. - */ - @Override - protected String extractMessageBody(Message message) { - return message.body(); - } - - /** - * Disposes the specified {@link Message}. - * - * @param message The {@link Message} from which to extract the - * message body. - */ - @Override - protected void disposeMessage(Message message) { - String receiptHandle = message.receiptHandle(); - - DeleteMessageRequest deleteMessageRequest = DeleteMessageRequest.builder() - .queueUrl(this.getSqsUrl()) - .receiptHandle(receiptHandle) - .build(); - - sqsClient.deleteMessage(deleteMessageRequest); - } - - /** - * {@inheritDoc} - */ - @Override - protected void doDestroy() { - // join to the consumption thread - try { - this.consumptionThread.join(); - synchronized (this) { - this.consumptionThread = null; - } - } catch (InterruptedException ignore) { - // ignore } - } } diff --git a/src/main/java/com/senzing/listener/service/AbstractListenerService.java b/src/main/java/com/senzing/listener/service/AbstractListenerService.java index ff9d7fa..db8eb65 100644 --- a/src/main/java/com/senzing/listener/service/AbstractListenerService.java +++ b/src/main/java/com/senzing/listener/service/AbstractListenerService.java @@ -27,859 +27,806 @@ * Provides a base class for {@link ListenerService} implementations. */ public abstract class AbstractListenerService implements ListenerService { - /** - * The initialization parameter used by the default implementation of - * {@link #initSchedulingService(JsonObject)} to specify the Java class name - * of the {@link SchedulingService} to use. If the default implementation of - * {@link #initSchedulingService(JsonObject)} is overridden, then this key - * may have no effect in the derived implementation. - */ - public static final String SCHEDULING_SERVICE_CLASS_KEY = "schedulingService"; - - /** - * The default class name for the {@link #SCHEDULING_SERVICE_CLASS_KEY} - * initialization parameter if none is specified. The - */ - public static final String DEFAULT_SCHEDULING_SERVICE_CLASS_NAME = SQLiteSchedulingService.class.getName(); - - /** - * The initialization parameter referencing a JSON object or {@link String} - * that represents the configuration for the {@link SchedulingService} - * instance created by the default implementation of {@link - * #initSchedulingService(JsonObject)} using the {@link - * #SCHEDULING_SERVICE_CLASS_KEY} init parameter. If the default - * implementation of {@link #initSchedulingService(JsonObject)} is overridden, - * then this key may have no effect in the derived implementation. - */ - public static final String SCHEDULING_SERVICE_CONFIG_KEY = "schedulingServiceConfig"; - - /** - * Enumerates the various parts of the info message that can be scheduled - * as actions when parsed. These are used as keys in a {@link Map} provided - * by the sub-class to map the message part to a scheduled action upon - * construction. Any message part that is excluded from the provided map on - * construction will not have any associated scheduled tasks in the default - * implementation of the task-scheduling functions, though they can be - * overridden. - */ - public enum MessagePart { - /** - * The record part of the message. - */ - RECORD, - - /** - * The part of the message associated with each affected entity - * (i.e.: one per affected entity). - */ - AFFECTED_ENTITY, - - /** - * The part of the message associated with each interesting entity - * (i.e.: one per interesting entity). - */ - INTERESTING_ENTITY, - - /** - * The part of the message associated with each notice (i.e.: one - * per notice). - */ - NOTICE; - } - - /** - * The task parameter key for the entity ID. - */ - public static final String ENTITY_ID_PARAMETER_KEY = "ENTITY_ID"; - - /** - * The task parameter key for the record ID. - */ - public static final String RECORD_ID_PARAMETER_KEY = "RECORD_ID"; - - /** - * The task parameter key for the data source. - */ - public static final String DATA_SOURCE_PARAMETER_KEY = "DATA_SOURCE"; - - /** - * The task parameter key for the interesting entity degrees of separation. - */ - public static final String DEGREES_PARAMETER_KEY = "DEGREES"; - - /** - * The task parameter key for the interesting entity flags. - */ - public static final String FLAGS_PARAMETER_KEY = "FLAGS"; - - /** - * The task parameter key for the interesting entity sample records. - */ - public static final String SAMPLE_RECORDS_PARAMETER_KEY = "SAMPLE_RECORDS"; - - /** - * The task parameter key for the code parameter of notices. - */ - public static final String CODE_PARAMETER_KEY = "CODE"; - - /** - * The task parameter for the description parameter of notices. - */ - public static final String DESCRIPTION_PARAMETER_KEY = "DESCRIPTION"; - - /** - * The resource key for locking a record. - */ - public static final String RECORD_RESOURCE_KEY = "RECORD"; - - /** - * The resource key for locking an entity. - */ - public static final String ENTITY_RESOURCE_KEY = "ENTITY"; - - /** - * A {@link TaskHandler} implementation that simply delegates to - * {@link AbstractListenerService#handleTask(String, Map, int, Scheduler)}. - */ - protected class ListenerTaskHandler implements TaskHandler { - /** - * Default constructor. + /** + * The initialization parameter used by the default implementation of + * {@link #initSchedulingService(JsonObject)} to specify the Java class name of + * the {@link SchedulingService} to use. If the default implementation of + * {@link #initSchedulingService(JsonObject)} is overridden, then this key may + * have no effect in the derived implementation. + */ + public static final String SCHEDULING_SERVICE_CLASS_KEY = "schedulingService"; + + /** + * The default class name for the {@link #SCHEDULING_SERVICE_CLASS_KEY} + * initialization parameter if none is specified. The + */ + public static final String DEFAULT_SCHEDULING_SERVICE_CLASS_NAME = SQLiteSchedulingService.class.getName(); + + /** + * The initialization parameter referencing a JSON object or {@link String} that + * represents the configuration for the {@link SchedulingService} instance + * created by the default implementation of + * {@link #initSchedulingService(JsonObject)} using the + * {@link #SCHEDULING_SERVICE_CLASS_KEY} init parameter. If the default + * implementation of {@link #initSchedulingService(JsonObject)} is overridden, + * then this key may have no effect in the derived implementation. + */ + public static final String SCHEDULING_SERVICE_CONFIG_KEY = "schedulingServiceConfig"; + + /** + * Enumerates the various parts of the info message that can be scheduled as + * actions when parsed. These are used as keys in a {@link Map} provided by the + * sub-class to map the message part to a scheduled action upon construction. + * Any message part that is excluded from the provided map on construction will + * not have any associated scheduled tasks in the default implementation of the + * task-scheduling functions, though they can be overridden. */ - public ListenerTaskHandler() { - // do nothing + public enum MessagePart { + /** + * The record part of the message. + */ + RECORD, + + /** + * The part of the message associated with each affected entity (i.e.: + * one per affected entity). + */ + AFFECTED_ENTITY, + + /** + * The part of the message associated with each interesting entity (i.e.: + * one per interesting entity). + */ + INTERESTING_ENTITY, + + /** + * The part of the message associated with each notice (i.e.: one per + * notice). + */ + NOTICE; } /** - * Overridden to call {@link AbstractListenerService#waitUntilReady(long)} - * on the parent object. - *

    - * {@inheritDoc} + * The task parameter key for the entity ID. */ - @Override - public Boolean waitUntilReady(long timeoutMillis) - throws InterruptedException { - return AbstractListenerService.this.waitUntilReady(timeoutMillis); + public static final String ENTITY_ID_PARAMETER_KEY = "ENTITY_ID"; + + /** + * The task parameter key for the record ID. + */ + public static final String RECORD_ID_PARAMETER_KEY = "RECORD_ID"; + + /** + * The task parameter key for the data source. + */ + public static final String DATA_SOURCE_PARAMETER_KEY = "DATA_SOURCE"; + + /** + * The task parameter key for the interesting entity degrees of separation. + */ + public static final String DEGREES_PARAMETER_KEY = "DEGREES"; + + /** + * The task parameter key for the interesting entity flags. + */ + public static final String FLAGS_PARAMETER_KEY = "FLAGS"; + + /** + * The task parameter key for the interesting entity sample records. + */ + public static final String SAMPLE_RECORDS_PARAMETER_KEY = "SAMPLE_RECORDS"; + + /** + * The task parameter key for the code parameter of notices. + */ + public static final String CODE_PARAMETER_KEY = "CODE"; + + /** + * The task parameter for the description parameter of notices. + */ + public static final String DESCRIPTION_PARAMETER_KEY = "DESCRIPTION"; + + /** + * The resource key for locking a record. + */ + public static final String RECORD_RESOURCE_KEY = "RECORD"; + + /** + * The resource key for locking an entity. + */ + public static final String ENTITY_RESOURCE_KEY = "ENTITY"; + + /** + * A {@link TaskHandler} implementation that simply delegates to + * {@link AbstractListenerService#handleTask(String, Map, int, Scheduler)}. + */ + protected class ListenerTaskHandler implements TaskHandler { + /** + * Default constructor. + */ + public ListenerTaskHandler() { + // do nothing + } + + /** + * Overridden to call {@link AbstractListenerService#waitUntilReady(long)} on + * the parent object. + *

    + * {@inheritDoc} + */ + @Override + public Boolean waitUntilReady(long timeoutMillis) throws InterruptedException { + return AbstractListenerService.this.waitUntilReady(timeoutMillis); + } + + /** + * Overridden to call + * {@link AbstractListenerService#handleTask(String, Map, int, Scheduler)} on + * the parent object. + *

    + * {@inheritDoc} + */ + @Override + public void handleTask(String action, Map parameters, int multiplicity, Scheduler followUpScheduler) throws ServiceExecutionException { + AbstractListenerService.this.handleTask(action, parameters, multiplicity, followUpScheduler); + } + } + + /** + * The {@link Map} of {@link MessagePart} keys to {@link String} values. + */ + private Map messagePartMap = null; + + /** + * The {@link State} of this instance. + */ + private State state = UNINITIALIZED; + + /** + * The backing {@link SchedulingService}. + */ + private SchedulingService schedulingService; + + /** + * The {@link TaskHandler} for this instance. + */ + private TaskHandler taskHandler = null; + + /** + * Constructs with the {@link Map} that maps {@link MessagePart} keys to + * {@link String} action names. + * + * @param messagePartMap The {@link Map} that maps {@link MessagePart}'s to + * {@link String} action keys. + */ + protected AbstractListenerService(Map messagePartMap) { + this.messagePartMap = new LinkedHashMap<>(); + this.messagePartMap.putAll(messagePartMap); + } + + /** + * Gets the {@link State} of this instance. + * + * @return The {@link State} of this instance. + */ + public synchronized State getState() { + return this.state; } /** - * Overridden to call {@link - * AbstractListenerService#handleTask(String, Map, int, Scheduler)} on the - * parent object. - *

    + * Implemented to return the statistics associated with this instance. + * * {@inheritDoc} */ @Override - public void handleTask(String action, - Map parameters, - int multiplicity, - Scheduler followUpScheduler) - throws ServiceExecutionException { - AbstractListenerService.this.handleTask( - action, parameters, multiplicity, followUpScheduler); + public synchronized Map getStatistics() { + return this.schedulingService.getStatistics(); } - } - - /** - * The {@link Map} of {@link MessagePart} keys to {@link String} values. - */ - private Map messagePartMap = null; - - /** - * The {@link State} of this instance. - */ - private State state = UNINITIALIZED; - - /** - * The backing {@link SchedulingService}. - */ - private SchedulingService schedulingService; - - /** - * The {@link TaskHandler} for this instance. - */ - private TaskHandler taskHandler = null; - - /** - * Constructs with the {@link Map} that maps {@link MessagePart} keys to - * {@link String} action names. - * - * @param messagePartMap The {@link Map} that maps {@link MessagePart}'s to - * {@link String} action keys. - */ - protected AbstractListenerService(Map messagePartMap) { - this.messagePartMap = new LinkedHashMap<>(); - this.messagePartMap.putAll(messagePartMap); - } - - /** - * Gets the {@link State} of this instance. - * - * @return The {@link State} of this instance. - */ - public synchronized State getState() { - return this.state; - } - - /** - * Implemented to return the statistics associated with this instance. - * - * {@inheritDoc} - */ - @Override - public synchronized Map getStatistics() { - return this.schedulingService.getStatistics(); - } - - /** - * Provides a means to set the {@link State} for this instance as a - * synchronized method that will notify all upon changing the state. - * - * @param state The {@link State} for this instance. - */ - protected synchronized void setState(State state) { - Objects.requireNonNull(state, "State cannot be null"); - this.state = state; - this.notifyAll(); - } - - /** - * This returns the {@link TaskHandler} that was given to the backing {@link - * SchedulingService} for handling tasks during initialization. If this is - * called prior to initialization then this returns null. - * - * @return The {@link TaskHandler} that was obtained via {@link - * #initTaskHandler(JsonObject)} during initialization, or - * null if this instance has not yet been initialized. - */ - protected TaskHandler getTaskHandler() { - return this.taskHandler; - } - - /** - * Creates the {@link TaskHandler} to use with the backing {@link - * SchedulingService} for handling tasks. This is called from {@link - * #init(JsonObject)}. By default this returns a new instance of {@link - * ListenerTaskHandler}. - * - * @param config The {@link JsonObject} describing the initialization config. - * @return The {@link TaskHandler} that was created / initialized. - * @throws ServiceExecutionException If a failure occurs in creating the - * {@link TaskHandler}. - */ - protected TaskHandler initTaskHandler(JsonObject config) - throws ServiceExecutionException { - return new ListenerTaskHandler(); - } - - /** - * Checks if this instance is ready to handle tasks and waits for - * it to be ready for the specified maximum number of milliseconds. Specify - * a negative number of milliseconds to wait indefinitely or zero (0) to - * simply check if ready with no waiting. This is used so the {@link - * SchedulingService} can delay handling tasks until ready. - * - * @param timeoutMillis The maximum number of milliseconds to wait for this - * task handler to become ready, a negative number to - * wait indefinitely, or zero (0) to simply poll without - * waiting. - * - * @return {@link Boolean#TRUE} if ready to handle tasks, {@link - * Boolean#FALSE} if not yet ready, and null if due to - * some failure we will never be ready to handle tasks. - * - * @throws InterruptedException If interrupted while waiting. - */ - protected synchronized Boolean waitUntilReady(long timeoutMillis) - throws InterruptedException { - switch (this.getState()) { - case AVAILABLE: - return Boolean.TRUE; - case DESTROYING: - case DESTROYED: - return null; - default: - if (timeoutMillis < 0L) { - this.wait(); - } else if (timeoutMillis > 0L) { - this.wait(timeoutMillis); - } - return (this.getState() == State.AVAILABLE) ? TRUE : FALSE; + + /** + * Provides a means to set the {@link State} for this instance as a synchronized + * method that will notify all upon changing the state. + * + * @param state The {@link State} for this instance. + */ + protected synchronized void setState(State state) { + Objects.requireNonNull(state, "State cannot be null"); + this.state = state; + this.notifyAll(); + } + + /** + * This returns the {@link TaskHandler} that was given to the backing + * {@link SchedulingService} for handling tasks during initialization. If this + * is called prior to initialization then this returns null. + * + * @return The {@link TaskHandler} that was obtained via + * {@link #initTaskHandler(JsonObject)} during initialization, or + * null if this instance has not yet been initialized. + */ + protected TaskHandler getTaskHandler() { + return this.taskHandler; + } + + /** + * Creates the {@link TaskHandler} to use with the backing + * {@link SchedulingService} for handling tasks. This is called from + * {@link #init(JsonObject)}. By default this returns a new instance of + * {@link ListenerTaskHandler}. + * + * @param config The {@link JsonObject} describing the initialization config. + * @return The {@link TaskHandler} that was created / initialized. + * @throws ServiceExecutionException If a failure occurs in creating the + * {@link TaskHandler}. + */ + protected TaskHandler initTaskHandler(JsonObject config) throws ServiceExecutionException { + return new ListenerTaskHandler(); } - } - - /** - * Called to handle the specified {@link Task} with an optional {@link - * Scheduler} for scheduling follow-up tasks if that is allowed. - * Additionally, a multiplicity is specified which, if greater than one (1), - * may require that the task be handled in a different way depending on the - * {@linkplain Task#getAction() action} associated with the {@link Task}. - * Typically, follow-up tasks may not be allowed if the specified - * {@link Task} is itself a follow-up {@link Task}. - * - * @param action The action from the {@link Task} to be handled. - * @param parameters The {@link Map} of parameters to use with the action - * to - * be taken. - * @param multiplicity The number of times an identical task was scheduled. - * @param followUpScheduler The {@link Scheduler} for scheduling follow-up - * tasks, or null if follow-up tasks - * cannot be scheduled. - * @throws ServiceExecutionException If a failure occurred in handling the - * task. - */ - protected abstract void handleTask(String action, - Map parameters, - int multiplicity, - Scheduler followUpScheduler) - throws ServiceExecutionException; - - /** - * Default implementation of {@link ListenerService#init(JsonObject)} - * that initializes with the defined parameter. - * - * @param config THe {@link JsonObject} describing the configuration. - * - * @throws ServiceSetupException If a failure occurs. - */ - @Override - public void init(JsonObject config) throws ServiceSetupException { - synchronized (this) { - if (this.getState() != UNINITIALIZED) { - throw new IllegalStateException( - "Cannot initialize if not in the " + UNINITIALIZED + " state: " - + this.getState()); - } - this.setState(INITIALIZING); + + /** + * Checks if this instance is ready to handle tasks and waits for it to be ready + * for the specified maximum number of milliseconds. Specify a negative number + * of milliseconds to wait indefinitely or zero (0) to simply check if ready + * with no waiting. This is used so the {@link SchedulingService} can delay + * handling tasks until ready. + * + * @param timeoutMillis The maximum number of milliseconds to wait for this task + * handler to become ready, a negative number to wait + * indefinitely, or zero (0) to simply poll without + * waiting. + * + * @return {@link Boolean#TRUE} if ready to handle tasks, {@link Boolean#FALSE} + * if not yet ready, and null if due to some failure we + * will never be ready to handle tasks. + * + * @throws InterruptedException If interrupted while waiting. + */ + protected synchronized Boolean waitUntilReady(long timeoutMillis) throws InterruptedException { + switch (this.getState()) { + case AVAILABLE: + return Boolean.TRUE; + case DESTROYING: + case DESTROYED: + return null; + default: + if (timeoutMillis < 0L) { + this.wait(); + } else if (timeoutMillis > 0L) { + this.wait(timeoutMillis); + } + return (this.getState() == State.AVAILABLE) ? TRUE : FALSE; + } } - try { - synchronized (this) { - // default to an empty JSON object if null - if (config == null) { - config = Json.createObjectBuilder().build(); + /** + * Called to handle the specified {@link Task} with an optional + * {@link Scheduler} for scheduling follow-up tasks if that is allowed. + * Additionally, a multiplicity is specified which, if greater than one (1), may + * require that the task be handled in a different way depending on the + * {@linkplain Task#getAction() action} associated with the {@link Task}. + * Typically, follow-up tasks may not be allowed if the specified {@link Task} + * is itself a follow-up {@link Task}. + * + * @param action The action from the {@link Task} to be handled. + * @param parameters The {@link Map} of parameters to use with the action + * to be taken. + * @param multiplicity The number of times an identical task was scheduled. + * @param followUpScheduler The {@link Scheduler} for scheduling follow-up + * tasks, or null if follow-up tasks + * cannot be scheduled. + * @throws ServiceExecutionException If a failure occurred in handling the task. + */ + protected abstract void handleTask(String action, Map parameters, int multiplicity, Scheduler followUpScheduler) throws ServiceExecutionException; + + /** + * Default implementation of {@link ListenerService#init(JsonObject)} that + * initializes with the defined parameter. + * + * @param config THe {@link JsonObject} describing the configuration. + * + * @throws ServiceSetupException If a failure occurs. + */ + @Override + public void init(JsonObject config) throws ServiceSetupException { + synchronized (this) { + if (this.getState() != UNINITIALIZED) { + throw new IllegalStateException( + "Cannot initialize if not in the " + UNINITIALIZED + " state: " + this.getState()); + } + this.setState(INITIALIZING); } - // initializes the task handler - this.taskHandler = this.initTaskHandler(config); + try { + synchronized (this) { + // default to an empty JSON object if null + if (config == null) { + config = Json.createObjectBuilder().build(); + } - // initialize the scheduling service - this.schedulingService = this.initSchedulingService(config); - } + // initializes the task handler + this.taskHandler = this.initTaskHandler(config); - this.doInit(config); + // initialize the scheduling service + this.schedulingService = this.initSchedulingService(config); + } - this.setState(AVAILABLE); + this.doInit(config); - } catch (ServiceSetupException e) { - this.setState(UNINITIALIZED); - throw e; + this.setState(AVAILABLE); - } catch (Exception e) { - this.setState(UNINITIALIZED); - throw new ServiceSetupException(e); + } catch (ServiceSetupException e) { + this.setState(UNINITIALIZED); + throw e; + + } catch (Exception e) { + this.setState(UNINITIALIZED); + throw new ServiceSetupException(e); + } } - } - - /** - * Utility method to convert the task being handled to JSON for logging - * purposes or for serialization. The specified {@link Map} of parameters - * should have values that can be converted to JSON via {@link - * com.senzing.util.JsonUtilities#toJsonObjectBuilder(Map)} - * - * @param action The action for the task. - * @param parameters The {@link Map} of parameters for the task. - * @param multiplicity The multiplicity for the task. - * - * @return The JSON text describing the task. - */ - protected String taskAsJson(String action, - Map parameters, - int multiplicity) { - JsonObjectBuilder job = Json.createObjectBuilder(); - job.add("action", action); - job.add("parameters", toJsonObjectBuilder(parameters)); - job.add("multiplicity", multiplicity); - return toJsonText(job.build()); - } - - /** - * Override this to perform whatever other initialization is required. - * - * @param config The {@link JsonObject} describing the initialization - * configuration. - * - * @throws ServiceSetupException If a failure occurs. - */ - protected abstract void doInit(JsonObject config) - throws ServiceSetupException; - - /** - * The default implementation of this method gets the class name from - * the {@link #SCHEDULING_SERVICE_CLASS_KEY} parameter, constructs an instance - * of that class using the default constructor and then initializes the - * constructed {@link SchedulingService} instance using the {@link JsonObject} - * found in the specified configuration via the {@link - * #SCHEDULING_SERVICE_CONFIG_KEY} JSON property. - * #SCHEDULING_SERVICE_CONFIG_KEY} JSON property. - * - * @param config The {@link JsonObject} describing the configuration for this - * instance of scheduling service. - * - * @return The {@link SchedulingService} that was created and initialized. - * @throws ServiceSetupException If an initialization failure occurs. - */ - protected SchedulingService initSchedulingService(JsonObject config) - throws ServiceSetupException { - try { - String className = getConfigString( - config, - SCHEDULING_SERVICE_CLASS_KEY, - this.getDefaultSchedulingServiceClassName()); - - // get the scheduling service Class object from the class name - Class schedServiceClass = Class.forName(className); - - if (!SchedulingService.class.isAssignableFrom(schedServiceClass)) { - throw new ServiceSetupException( - "The configured scheduling service class for the " - + SCHEDULING_SERVICE_CLASS_KEY + " config parameter must " - + "implement " + SchedulingService.class.getName()); - } - - // create an instance of the SchedulingService class - SchedulingService schedulingService = (SchedulingService) schedServiceClass.getConstructor().newInstance(); - - // get the scheduling service configuration - JsonObject schedServiceConfig = config.containsKey(SCHEDULING_SERVICE_CONFIG_KEY) - ? getJsonObject(config, SCHEDULING_SERVICE_CONFIG_KEY) - : this.getDefaultSchedulingServiceConfig(); - - // initialize the scheduling service - schedulingService.init(schedServiceConfig, this.getTaskHandler()); - - // return the scheduling service - return schedulingService; - - } catch (ServiceSetupException e) { - throw e; - - } catch (Exception e) { - throw new ServiceSetupException( - "Failed to initialize SchedulingService for ListenerService", e); + + /** + * Utility method to convert the task being handled to JSON for logging purposes + * or for serialization. The specified {@link Map} of parameters should have + * values that can be converted to JSON via + * {@link com.senzing.util.JsonUtilities#toJsonObjectBuilder(Map)} + * + * @param action The action for the task. + * @param parameters The {@link Map} of parameters for the task. + * @param multiplicity The multiplicity for the task. + * + * @return The JSON text describing the task. + */ + protected String taskAsJson(String action, Map parameters, int multiplicity) { + JsonObjectBuilder job = Json.createObjectBuilder(); + job.add("action", action); + job.add("parameters", toJsonObjectBuilder(parameters)); + job.add("multiplicity", multiplicity); + return toJsonText(job.build()); } - } - - /** - * Gets the default {@link SchedulingService} class name with which to - * initialize the backing {@link SchedulingService} if one is not specified - * in the initialization configuration via the {@link - * #SCHEDULING_SERVICE_CLASS_KEY} initialization parameter. By default, this - * returns the {@link #DEFAULT_SCHEDULING_SERVICE_CLASS_NAME}, but it may be - * overridden to return something more sensible for a derived implementation. - * - * @return The default {@link SchedulingService} class name with which to - * initialize. - * - * @see #initSchedulingService(JsonObject) - * @see #getDefaultSchedulingServiceConfig() - * @see #SCHEDULING_SERVICE_CLASS_KEY - * @see #SCHEDULING_SERVICE_CONFIG_KEY - * @see #DEFAULT_SCHEDULING_SERVICE_CLASS_NAME - * - */ - public String getDefaultSchedulingServiceClassName() { - return DEFAULT_SCHEDULING_SERVICE_CLASS_NAME; - } - - /** - * Gets the default {@link JsonObject} configuration with which to initialize - * the backing {@link SchedulingService} if one is not specified in the - * initialization configuration via the {@link #SCHEDULING_SERVICE_CONFIG_KEY} - * initialization parameter. By default, this returns the null, - * but it may be overridden to return something more sensible for a derived - * implementation. - * - * @return The default {@link JsonObject} configuration with which to - * initialize the backing {@link SchedulingService}. - * - * @see #initSchedulingService(JsonObject) - * @see #getDefaultSchedulingServiceClassName() - * @see #SCHEDULING_SERVICE_CLASS_KEY - * @see #SCHEDULING_SERVICE_CONFIG_KEY - */ - public JsonObject getDefaultSchedulingServiceConfig() { - return null; - } - - /** - * Processes the message described by the specified {@link JsonObject}. - * - * @param message The {@link JsonObject} describing the message. - * - * @throws ServiceExecutionException If a failure occurs. - */ - @Override - public void process(JsonObject message) throws ServiceExecutionException { - try { - // check the state - if (this.getState() != AVAILABLE) { - throw new IllegalStateException( - "Cannot process messages when not in the " + AVAILABLE + " state: " - + state); - } - - // get the scheduler - Scheduler scheduler = this.schedulingService.createScheduler(); - - // get the task group - TaskGroup taskGroup = scheduler.getTaskGroup(); - if (taskGroup == null) { - throw new IllegalStateException("The TaskGroup should not be null"); - } - - // schedule the tasks - this.scheduleTasks(message, scheduler); - - // commit the scheduler tasks - scheduler.commit(); - - // wait for the tasks to be completed - logDebug("AWAITING COMPLETION ON TASK GROUP: " + taskGroup.getTaskCount()); - taskGroup.awaitCompletion(); - logDebug("COMPLETED TASK GROUP: " + taskGroup.getTaskCount()); - - // determine the state of the group - TaskGroup.State groupState = taskGroup.getState(); - logDebug("COMPLETED TASK GROUP STATE: " + groupState); - if (groupState == TaskGroup.State.SUCCESSFUL) { - return; - } - - // if we get here then we had a failure - List failedTasks = taskGroup.getFailedTasks(); - if (failedTasks.size() == 1) { - Exception failure = failedTasks.get(0).getFailure(); - if (failure instanceof ServiceExecutionException) { - throw ((ServiceExecutionException) failure); - } else { - throw new ServiceExecutionException(failure); - } - } else { - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - - for (Task failedTask : failedTasks) { - Exception failure = failedTask.getFailure(); - pw.println("----------------------------------------"); - pw.println(failedTask); - failure.printStackTrace(pw); - pw.println(); + + /** + * Override this to perform whatever other initialization is required. + * + * @param config The {@link JsonObject} describing the initialization + * configuration. + * + * @throws ServiceSetupException If a failure occurs. + */ + protected abstract void doInit(JsonObject config) throws ServiceSetupException; + + /** + * The default implementation of this method gets the class name from the + * {@link #SCHEDULING_SERVICE_CLASS_KEY} parameter, constructs an instance of + * that class using the default constructor and then initializes the constructed + * {@link SchedulingService} instance using the {@link JsonObject} found in the + * specified configuration via the {@link #SCHEDULING_SERVICE_CONFIG_KEY} JSON + * property. #SCHEDULING_SERVICE_CONFIG_KEY} JSON property. + * + * @param config The {@link JsonObject} describing the configuration for this + * instance of scheduling service. + * + * @return The {@link SchedulingService} that was created and initialized. + * @throws ServiceSetupException If an initialization failure occurs. + */ + protected SchedulingService initSchedulingService(JsonObject config) throws ServiceSetupException { + try { + String className = getConfigString(config, SCHEDULING_SERVICE_CLASS_KEY, + this.getDefaultSchedulingServiceClassName()); + + // get the scheduling service Class object from the class name + Class schedServiceClass = Class.forName(className); + + if (!SchedulingService.class.isAssignableFrom(schedServiceClass)) { + throw new ServiceSetupException( + "The configured scheduling service class for the " + SCHEDULING_SERVICE_CLASS_KEY + + " config parameter must " + "implement " + SchedulingService.class.getName()); + } + + // create an instance of the SchedulingService class + SchedulingService schedulingService = (SchedulingService) schedServiceClass.getConstructor().newInstance(); + + // get the scheduling service configuration + JsonObject schedServiceConfig = config.containsKey(SCHEDULING_SERVICE_CONFIG_KEY) + ? getJsonObject(config, SCHEDULING_SERVICE_CONFIG_KEY) + : this.getDefaultSchedulingServiceConfig(); + + // initialize the scheduling service + schedulingService.init(schedServiceConfig, this.getTaskHandler()); + + // return the scheduling service + return schedulingService; + + } catch (ServiceSetupException e) { + throw e; + + } catch (Exception e) { + throw new ServiceSetupException("Failed to initialize SchedulingService for ListenerService", e); } - throw new ServiceExecutionException(pw.toString()); - } - } catch (ServiceExecutionException e) { - throw e; - - } catch (RuntimeException e) { - e.printStackTrace(); - throw new ServiceExecutionException(e); } - } - - /** - * Schedules the tasks for the specified message using the specified - * {@link Scheduler}. - * - * @param message The {@link JsonObject} for the message. - * @param scheduler The {@link Scheduler} to use for the tasks. - * @throws ServiceExecutionException If a failure occurs. - */ - protected void scheduleTasks(JsonObject message, Scheduler scheduler) - throws ServiceExecutionException { - SzInfoMessage infoMessage = SzInfoMessage.fromRawJson(message); - - // handle the record first - String dataSource = getString(message, RAW_DATA_SOURCE_KEY); - String recordId = getString(message, RAW_RECORD_ID_KEY); - this.handleRecord(dataSource, recordId, infoMessage, message, scheduler); - - // now handle the affected entities - JsonArray jsonArray = getJsonArray(message, RAW_AFFECTED_ENTITIES_KEY); - if (jsonArray != null) { - for (JsonObject affected : jsonArray.getValuesAs(JsonObject.class)) { - Long entityId = getLong(affected, RAW_ENTITY_ID_KEY); - this.handleAffected( - entityId, infoMessage, affected, message, scheduler); - } + + /** + * Gets the default {@link SchedulingService} class name with which to + * initialize the backing {@link SchedulingService} if one is not specified in + * the initialization configuration via the + * {@link #SCHEDULING_SERVICE_CLASS_KEY} initialization parameter. By default, + * this returns the {@link #DEFAULT_SCHEDULING_SERVICE_CLASS_NAME}, but it may + * be overridden to return something more sensible for a derived implementation. + * + * @return The default {@link SchedulingService} class name with which to + * initialize. + * + * @see #initSchedulingService(JsonObject) + * @see #getDefaultSchedulingServiceConfig() + * @see #SCHEDULING_SERVICE_CLASS_KEY + * @see #SCHEDULING_SERVICE_CONFIG_KEY + * @see #DEFAULT_SCHEDULING_SERVICE_CLASS_NAME + * + */ + public String getDefaultSchedulingServiceClassName() { + return DEFAULT_SCHEDULING_SERVICE_CLASS_NAME; + } + + /** + * Gets the default {@link JsonObject} configuration with which to initialize + * the backing {@link SchedulingService} if one is not specified in the + * initialization configuration via the {@link #SCHEDULING_SERVICE_CONFIG_KEY} + * initialization parameter. By default, this returns the null, but + * it may be overridden to return something more sensible for a derived + * implementation. + * + * @return The default {@link JsonObject} configuration with which to initialize + * the backing {@link SchedulingService}. + * + * @see #initSchedulingService(JsonObject) + * @see #getDefaultSchedulingServiceClassName() + * @see #SCHEDULING_SERVICE_CLASS_KEY + * @see #SCHEDULING_SERVICE_CONFIG_KEY + */ + public JsonObject getDefaultSchedulingServiceConfig() { + return null; + } + + /** + * Processes the message described by the specified {@link JsonObject}. + * + * @param message The {@link JsonObject} describing the message. + * + * @throws ServiceExecutionException If a failure occurs. + */ + @Override + public void process(JsonObject message) throws ServiceExecutionException { + try { + // check the state + if (this.getState() != AVAILABLE) { + throw new IllegalStateException( + "Cannot process messages when not in the " + AVAILABLE + " state: " + state); + } + + // get the scheduler + Scheduler scheduler = this.schedulingService.createScheduler(); + + // get the task group + TaskGroup taskGroup = scheduler.getTaskGroup(); + if (taskGroup == null) { + throw new IllegalStateException("The TaskGroup should not be null"); + } + + // schedule the tasks + this.scheduleTasks(message, scheduler); + + // commit the scheduler tasks + scheduler.commit(); + + // wait for the tasks to be completed + logDebug("AWAITING COMPLETION ON TASK GROUP: " + taskGroup.getTaskCount()); + taskGroup.awaitCompletion(); + logDebug("COMPLETED TASK GROUP: " + taskGroup.getTaskCount()); + + // determine the state of the group + TaskGroup.State groupState = taskGroup.getState(); + logDebug("COMPLETED TASK GROUP STATE: " + groupState); + if (groupState == TaskGroup.State.SUCCESSFUL) { + return; + } + + // if we get here then we had a failure + List failedTasks = taskGroup.getFailedTasks(); + if (failedTasks.size() == 1) { + Exception failure = failedTasks.get(0).getFailure(); + if (failure instanceof ServiceExecutionException) { + throw ((ServiceExecutionException) failure); + } else { + throw new ServiceExecutionException(failure); + } + } else { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + + for (Task failedTask : failedTasks) { + Exception failure = failedTask.getFailure(); + pw.println("----------------------------------------"); + pw.println(failedTask); + System.err.println(failure.getMessage()); + System.err.println(formatStackTrace(failure.getStackTrace())); + pw.println(); + } + throw new ServiceExecutionException(pw.toString()); + } + } catch (ServiceExecutionException e) { + throw e; + + } catch (RuntimeException e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + throw new ServiceExecutionException(e); + } } - // now handle the interesting entities - JsonObject jsonObj = getJsonObject(message, RAW_INTERESTING_ENTITIES_KEY); - if (jsonObj != null) { - jsonArray = getJsonArray(jsonObj, RAW_ENTITIES_KEY); - if (jsonArray != null) { - Iterator iter = infoMessage.getInterestingEntities().iterator(); - for (JsonObject interesting : jsonArray.getValuesAs(JsonObject.class)) { - SzInterestingEntity next = iter.next(); - this.handleInteresting( - next, infoMessage, interesting, message, scheduler); + /** + * Schedules the tasks for the specified message using the specified + * {@link Scheduler}. + * + * @param message The {@link JsonObject} for the message. + * @param scheduler The {@link Scheduler} to use for the tasks. + * @throws ServiceExecutionException If a failure occurs. + */ + protected void scheduleTasks(JsonObject message, Scheduler scheduler) throws ServiceExecutionException { + SzInfoMessage infoMessage = SzInfoMessage.fromRawJson(message); + + // handle the record first + String dataSource = getString(message, RAW_DATA_SOURCE_KEY); + String recordId = getString(message, RAW_RECORD_ID_KEY); + this.handleRecord(dataSource, recordId, infoMessage, message, scheduler); + + // now handle the affected entities + JsonArray jsonArray = getJsonArray(message, RAW_AFFECTED_ENTITIES_KEY); + if (jsonArray != null) { + for (JsonObject affected : jsonArray.getValuesAs(JsonObject.class)) { + Long entityId = getLong(affected, RAW_ENTITY_ID_KEY); + this.handleAffected(entityId, infoMessage, affected, message, scheduler); + } } - } - - // handle the notices - jsonArray = getJsonArray(message, "NOTICES"); - if (jsonArray != null) { - Iterator noticeIter = infoMessage.getNotices().iterator(); - for (JsonObject notice : jsonArray.getValuesAs(JsonObject.class)) { - SzNotice next = noticeIter.next(); - this.handleNotice(next, infoMessage, notice, message, scheduler); + + // now handle the interesting entities + JsonObject jsonObj = getJsonObject(message, RAW_INTERESTING_ENTITIES_KEY); + if (jsonObj != null) { + jsonArray = getJsonArray(jsonObj, RAW_ENTITIES_KEY); + if (jsonArray != null) { + Iterator iter = infoMessage.getInterestingEntities().iterator(); + for (JsonObject interesting : jsonArray.getValuesAs(JsonObject.class)) { + SzInterestingEntity next = iter.next(); + this.handleInteresting(next, infoMessage, interesting, message, scheduler); + } + } + + // handle the notices + jsonArray = getJsonArray(message, "NOTICES"); + if (jsonArray != null) { + Iterator noticeIter = infoMessage.getNotices().iterator(); + for (JsonObject notice : jsonArray.getValuesAs(JsonObject.class)) { + SzNotice next = noticeIter.next(); + this.handleNotice(next, infoMessage, notice, message, scheduler); + } + } } - } } - } - - /** - * Returns the {@link String} action identifier for the specified - * {@link MessagePart}. The default implementation of this uses the - * {@link Map} with which this instance was constructed. - * - * @param messagePart The {@link MessagePart} for which the action is being - * requested. - * @return The associated action or null if no action should - * be mapped to the {@link MessagePart}. - */ - protected String getActionForMessagePart(MessagePart messagePart) { - return this.messagePartMap.get(messagePart); - } - - /** - * This method is called for the data source code and record ID found in the - * root of the INFO message. If {@link - * #getActionForMessagePart(MessagePart)} returns null for - * {@link MessagePart#RECORD} then this method does nothing (but - * may be overridden), otherwise if there is an associated task for the - * {@link MessagePart#RECORD}, then a new {@link Task} is scheduled using the - * specified {@link Scheduler} with the associated action key and the - * following parameters and required resources: - *

      - *
    • Parameter: {@link #DATA_SOURCE_PARAMETER_KEY} (string)
    • - *
    • Parameter: {@link #RECORD_ID_PARAMETER_KEY} (string)
    • - *
    • Resource: {@link #RECORD_RESOURCE_KEY} (for the associated record)
    • - *
    - * - * @param dataSource The {@link String} data source code from the info message. - * @param recordId The {@link String} record ID from the info message. - * @param infoMessage The {@link SzInfoMessage} describing the INFO message. - * @param rawMessage The entire INFO message in its raw form. - * @param scheduler The {@link Scheduler} to be used to schedule the task. - */ - protected void handleRecord(String dataSource, - String recordId, - SzInfoMessage infoMessage, - JsonObject rawMessage, - Scheduler scheduler) { - String action = this.messagePartMap.get(MessagePart.RECORD); - if (action == null || action.trim().length() == 0) { - return; + + /** + * Returns the {@link String} action identifier for the specified + * {@link MessagePart}. The default implementation of this uses the {@link Map} + * with which this instance was constructed. + * + * @param messagePart The {@link MessagePart} for which the action is being + * requested. + * @return The associated action or null if no action should be + * mapped to the {@link MessagePart}. + */ + protected String getActionForMessagePart(MessagePart messagePart) { + return this.messagePartMap.get(messagePart); } - scheduler.createTaskBuilder(action) - .parameter(DATA_SOURCE_PARAMETER_KEY, dataSource) - .parameter(RECORD_ID_PARAMETER_KEY, recordId) - .resource(RECORD_RESOURCE_KEY, dataSource, recordId) - .schedule(); - } - - /** - * This method is called for each entity ID in the - * AFFECTED_ENTITIES found in an INFO message. If - * {@link #getActionForMessagePart(MessagePart)} returns null for - * {@link MessagePart#AFFECTED_ENTITY} then this method does nothing (but - * may be overridden), otherwise if there is an associated task for the - * {@link MessagePart#AFFECTED_ENTITY}, then a new {@link Task} is - * scheduled using the specified {@link Scheduler} with the associated - * action key and the following parameters and required resources: - *
      - *
    • Parameter: {@link #ENTITY_ID_PARAMETER_KEY} (long integer)
    • - *
    • Resource: {@link #ENTITY_RESOURCE_KEY} (for the associated entity)
    • - *
    - * - * @param entityId The {@link Long} entity ID identifying the affected - * entity. - * @param infoMessage The {@link SzInfoMessage} describing the INFO message. - * @param rawMessagePart The {@link JsonObject} describing the interesting - * entity in its raw JSON form. - * @param rawMessage The entire INFO message in its raw form. - * @param scheduler The {@link Scheduler} to be used to schedule the task. - */ - protected void handleAffected(long entityId, - SzInfoMessage infoMessage, - JsonObject rawMessagePart, - JsonObject rawMessage, - Scheduler scheduler) { - String action = this.messagePartMap.get(MessagePart.AFFECTED_ENTITY); - if (action == null || action.trim().length() == 0) { - return; + + /** + * This method is called for the data source code and record ID found in the + * root of the INFO message. If {@link #getActionForMessagePart(MessagePart)} + * returns null for {@link MessagePart#RECORD} then this method + * does nothing (but may be overridden), otherwise if there is an associated + * task for the {@link MessagePart#RECORD}, then a new {@link Task} is scheduled + * using the specified {@link Scheduler} with the associated action key and the + * following parameters and required resources: + *
      + *
    • Parameter: {@link #DATA_SOURCE_PARAMETER_KEY} (string)
    • + *
    • Parameter: {@link #RECORD_ID_PARAMETER_KEY} (string)
    • + *
    • Resource: {@link #RECORD_RESOURCE_KEY} (for the associated record)
    • + *
    + * + * @param dataSource The {@link String} data source code from the info message. + * @param recordId The {@link String} record ID from the info message. + * @param infoMessage The {@link SzInfoMessage} describing the INFO message. + * @param rawMessage The entire INFO message in its raw form. + * @param scheduler The {@link Scheduler} to be used to schedule the task. + */ + protected void handleRecord(String dataSource, String recordId, SzInfoMessage infoMessage, JsonObject rawMessage, Scheduler scheduler) { + String action = this.messagePartMap.get(MessagePart.RECORD); + if (action == null || action.trim().length() == 0) { + return; + } + scheduler.createTaskBuilder(action).parameter(DATA_SOURCE_PARAMETER_KEY, dataSource) + .parameter(RECORD_ID_PARAMETER_KEY, recordId).resource(RECORD_RESOURCE_KEY, dataSource, recordId) + .schedule(); } - scheduler.createTaskBuilder(action) - .parameter(ENTITY_ID_PARAMETER_KEY, entityId) - .resource(ENTITY_RESOURCE_KEY, entityId) - .schedule(); - } - - /** - * This method is called for each element in the - * INTERESTING_ENTITIES found in an INFO message. If - * {@link #getActionForMessagePart(MessagePart)} returns null for - * {@link MessagePart#INTERESTING_ENTITY} then this method does nothing (but - * may be overridden), otherwise if there is an associated task for the - * {@link MessagePart#INTERESTING_ENTITY}, then a new {@link Task} is - * scheduled using the specified {@link Scheduler} with the associated - * action key and the following parameters and required resources: - *
      - *
    • Parameter: {@link #ENTITY_ID_PARAMETER_KEY} (long integer)
    • - *
    • Parameter: {@link #DEGREES_PARAMETER_KEY} (integer)
    • - *
    • Parameter: {@link #FLAGS_PARAMETER_KEY} (array of strings)
    • - *
    • Parameter: {@link #SAMPLE_RECORDS_PARAMETER_KEY} (array of objects - * matching the format from {@link SzSampleRecord#toJsonObject()}) - *
    • - *
    • Resource: {@link #ENTITY_RESOURCE_KEY} (for the associated entity)
    • - *
    - * - * @param interestingEntity The {@link SzInterestingEntity} describing the - * interesting entity to handle. - * @param infoMessage The {@link SzInfoMessage} describing the INFO - * message. - * @param rawMessagePart The {@link JsonObject} describing the interesting - * entity in its raw JSON form. - * @param rawMessage The entire INFO message in its raw form. - * @param scheduler The {@link Scheduler} to be used to schedule the - * task. - */ - protected void handleInteresting(SzInterestingEntity interestingEntity, - SzInfoMessage infoMessage, - JsonObject rawMessagePart, - JsonObject rawMessage, - Scheduler scheduler) { - String action = this.messagePartMap.get(MessagePart.INTERESTING_ENTITY); - if (action == null || action.trim().length() == 0) { - return; + + /** + * This method is called for each entity ID in the + * AFFECTED_ENTITIES found in an INFO message. If + * {@link #getActionForMessagePart(MessagePart)} returns null for + * {@link MessagePart#AFFECTED_ENTITY} then this method does nothing (but may be + * overridden), otherwise if there is an associated task for the + * {@link MessagePart#AFFECTED_ENTITY}, then a new {@link Task} is scheduled + * using the specified {@link Scheduler} with the associated action key and the + * following parameters and required resources: + *
      + *
    • Parameter: {@link #ENTITY_ID_PARAMETER_KEY} (long integer)
    • + *
    • Resource: {@link #ENTITY_RESOURCE_KEY} (for the associated entity)
    • + *
    + * + * @param entityId The {@link Long} entity ID identifying the affected + * entity. + * @param infoMessage The {@link SzInfoMessage} describing the INFO message. + * @param rawMessagePart The {@link JsonObject} describing the interesting + * entity in its raw JSON form. + * @param rawMessage The entire INFO message in its raw form. + * @param scheduler The {@link Scheduler} to be used to schedule the task. + */ + protected void handleAffected(long entityId, SzInfoMessage infoMessage, JsonObject rawMessagePart, JsonObject rawMessage, Scheduler scheduler) { + String action = this.messagePartMap.get(MessagePart.AFFECTED_ENTITY); + if (action == null || action.trim().length() == 0) { + return; + } + scheduler.createTaskBuilder(action).parameter(ENTITY_ID_PARAMETER_KEY, entityId) + .resource(ENTITY_RESOURCE_KEY, entityId).schedule(); } - // begin building the task with the basic parameters - TaskBuilder.ListParamBuilder builder = scheduler.createTaskBuilder(action) - .parameter(ENTITY_ID_PARAMETER_KEY, interestingEntity.getEntityId()) - .parameter(DEGREES_PARAMETER_KEY, interestingEntity.getDegrees()) - .listParameter(FLAGS_PARAMETER_KEY); + /** + * This method is called for each element in the + * INTERESTING_ENTITIES found in an INFO message. If + * {@link #getActionForMessagePart(MessagePart)} returns null for + * {@link MessagePart#INTERESTING_ENTITY} then this method does nothing (but may + * be overridden), otherwise if there is an associated task for the + * {@link MessagePart#INTERESTING_ENTITY}, then a new {@link Task} is scheduled + * using the specified {@link Scheduler} with the associated action key and the + * following parameters and required resources: + *
      + *
    • Parameter: {@link #ENTITY_ID_PARAMETER_KEY} (long integer)
    • + *
    • Parameter: {@link #DEGREES_PARAMETER_KEY} (integer)
    • + *
    • Parameter: {@link #FLAGS_PARAMETER_KEY} (array of strings)
    • + *
    • Parameter: {@link #SAMPLE_RECORDS_PARAMETER_KEY} (array of objects + * matching the format from {@link SzSampleRecord#toJsonObject()})
    • + *
    • Resource: {@link #ENTITY_RESOURCE_KEY} (for the associated entity)
    • + *
    + * + * @param interestingEntity The {@link SzInterestingEntity} describing the + * interesting entity to handle. + * @param infoMessage The {@link SzInfoMessage} describing the INFO + * message. + * @param rawMessagePart The {@link JsonObject} describing the interesting + * entity in its raw JSON form. + * @param rawMessage The entire INFO message in its raw form. + * @param scheduler The {@link Scheduler} to be used to schedule the + * task. + */ + protected void handleInteresting(SzInterestingEntity interestingEntity, SzInfoMessage infoMessage, JsonObject rawMessagePart, JsonObject rawMessage, Scheduler scheduler) { + String action = this.messagePartMap.get(MessagePart.INTERESTING_ENTITY); + if (action == null || action.trim().length() == 0) { + return; + } + + // begin building the task with the basic parameters + TaskBuilder.ListParamBuilder builder = scheduler.createTaskBuilder(action) + .parameter(ENTITY_ID_PARAMETER_KEY, interestingEntity.getEntityId()) + .parameter(DEGREES_PARAMETER_KEY, interestingEntity.getDegrees()).listParameter(FLAGS_PARAMETER_KEY); - // add the flags to the list parameter - for (String flag : interestingEntity.getFlags()) { - builder.add(flag); - } + // add the flags to the list parameter + for (String flag : interestingEntity.getFlags()) { + builder.add(flag); + } + + // end the list and start the sample records parameter + builder = builder.endList().listParameter(SAMPLE_RECORDS_PARAMETER_KEY); - // end the list and start the sample records parameter - builder = builder.endList().listParameter(SAMPLE_RECORDS_PARAMETER_KEY); + // add the sample records to the list parameter + for (SzSampleRecord record : interestingEntity.getSampleRecords()) { + builder.add(record.toJsonObject()); + } - // add the sample records to the list parameter - for (SzSampleRecord record : interestingEntity.getSampleRecords()) { - builder.add(record.toJsonObject()); + // end the list, add the required resources and schedule the task + builder.endList().resource(ENTITY_RESOURCE_KEY, interestingEntity.getEntityId()).schedule(); } - // end the list, add the required resources and schedule the task - builder.endList() - .resource(ENTITY_RESOURCE_KEY, interestingEntity.getEntityId()) - .schedule(); - } - - /** - * This method is called for each element in the NOTICES array - * found in an INFO message. If {@link #getActionForMessagePart(MessagePart)} - * returns null for {@link MessagePart#NOTICE} then this method - * does nothing (but may be overridden), otherwise if there is an associated - * task for the {@link MessagePart#NOTICE}, then a new {@link Task} is - * scheduled using the specified {@link Scheduler} with the associated - * action key and the following parameters and required resources: - *
      - *
    • Parameter: {@link #CODE_PARAMETER_KEY} (string)
    • - *
    • Parameter: {@link #DESCRIPTION_PARAMETER_KEY} (string)
    • - *
    • Resources: [None]
    • - *
    - * - * @param notice The {@link SzNotice} describing the notice. - * @param infoMessage The {@link SzInfoMessage} describing the INFO message. - * @param rawMessagePart The {@link JsonObject} describing the notice in its - * raw JSON form. - * @param rawMessage The entire INFO message. - * @param scheduler The {@link Scheduler} to be used to schedule the task. - */ - protected void handleNotice(SzNotice notice, - SzInfoMessage infoMessage, - JsonObject rawMessagePart, - JsonObject rawMessage, - Scheduler scheduler) { - String action = this.getActionForMessagePart(MessagePart.NOTICE); - if (action == null || action.trim().length() == 0) { - return; + /** + * This method is called for each element in the NOTICES array + * found in an INFO message. If {@link #getActionForMessagePart(MessagePart)} + * returns null for {@link MessagePart#NOTICE} then this method + * does nothing (but may be overridden), otherwise if there is an associated + * task for the {@link MessagePart#NOTICE}, then a new {@link Task} is scheduled + * using the specified {@link Scheduler} with the associated action key and the + * following parameters and required resources: + *
      + *
    • Parameter: {@link #CODE_PARAMETER_KEY} (string)
    • + *
    • Parameter: {@link #DESCRIPTION_PARAMETER_KEY} (string)
    • + *
    • Resources: [None]
    • + *
    + * + * @param notice The {@link SzNotice} describing the notice. + * @param infoMessage The {@link SzInfoMessage} describing the INFO message. + * @param rawMessagePart The {@link JsonObject} describing the notice in its raw + * JSON form. + * @param rawMessage The entire INFO message. + * @param scheduler The {@link Scheduler} to be used to schedule the task. + */ + protected void handleNotice(SzNotice notice, SzInfoMessage infoMessage, JsonObject rawMessagePart, JsonObject rawMessage, Scheduler scheduler) { + String action = this.getActionForMessagePart(MessagePart.NOTICE); + if (action == null || action.trim().length() == 0) { + return; + } + scheduler.createTaskBuilder(action).parameter(CODE_PARAMETER_KEY, notice.getCode()) + .parameter(DESCRIPTION_PARAMETER_KEY, notice.getDescription()).schedule(); } - scheduler.createTaskBuilder(action) - .parameter(CODE_PARAMETER_KEY, notice.getCode()) - .parameter(DESCRIPTION_PARAMETER_KEY, notice.getDescription()) - .schedule(); - } - - /** - * Implemented as a synchronized method to {@linkplain #setState(State) - * set the state} to {@link com.senzing.listener.communication.MessageConsumer.State#DESTROYING}, call - * {@link #doDestroy()} and - * then perform {@link #notifyAll()} and set the state to {@link - * com.senzing.listener.communication.MessageConsumer.State#DESTROYED}. - */ - public void destroy() { - synchronized (this) { - State state = this.getState(); - if (state == DESTROYED) { - return; - } - - if (state == DESTROYING) { - while (this.getState() != DESTROYED) { - try { - this.wait(1000L); - } catch (InterruptedException e) { - // ignore - } + + /** + * Implemented as a synchronized method to {@linkplain #setState(State) set the + * state} to + * {@link com.senzing.listener.communication.MessageConsumer.State#DESTROYING}, + * call {@link #doDestroy()} and then perform {@link #notifyAll()} and set the + * state to + * {@link com.senzing.listener.communication.MessageConsumer.State#DESTROYED}. + */ + public void destroy() { + synchronized (this) { + State state = this.getState(); + if (state == DESTROYED) { + return; + } + + if (state == DESTROYING) { + while (this.getState() != DESTROYED) { + try { + this.wait(1000L); + } catch (InterruptedException e) { + // ignore + } + } + // once DESTROYED state is found, just return + return; + } + + // begin destruction + this.setState(DESTROYING); } - // once DESTROYED state is found, just return - return; - } - // begin destruction - this.setState(DESTROYING); - } + // destroy the scheduling service + this.schedulingService.destroy(); - // destroy the scheduling service - this.schedulingService.destroy(); + try { + // now complete the destruction / cleanup + this.doDestroy(); - try { - // now complete the destruction / cleanup - this.doDestroy(); + } finally { + this.setState(DESTROYED); // this should notify all as well + } + } - } finally { - this.setState(DESTROYED); // this should notify all as well + /** + * Gets the backing {@link SchedulingService} used by this instance. + * + * @return The backing {@link SchedulingService} used by this instance. + */ + protected SchedulingService getSchedulingService() { + return this.schedulingService; } - } - - /** - * Gets the backing {@link SchedulingService} used by this instance. - * - * @return The backing {@link SchedulingService} used by this instance. - */ - protected SchedulingService getSchedulingService() { - return this.schedulingService; - } - - /** - * This is called from the {@link #destroy()} implementation and should be - * overridden by the concrete sub-class. - */ - protected abstract void doDestroy(); + + /** + * This is called from the {@link #destroy()} implementation and should be + * overridden by the concrete sub-class. + */ + protected abstract void doDestroy(); } diff --git a/src/main/java/com/senzing/listener/service/locking/LockToken.java b/src/main/java/com/senzing/listener/service/locking/LockToken.java index 2b13df3..47b4af6 100644 --- a/src/main/java/com/senzing/listener/service/locking/LockToken.java +++ b/src/main/java/com/senzing/listener/service/locking/LockToken.java @@ -1,5 +1,7 @@ package com.senzing.listener.service.locking; +import static com.senzing.util.LoggingUtilities.formatStackTrace; + import java.io.Serializable; import java.net.InetAddress; import java.net.NetworkInterface; @@ -14,342 +16,332 @@ * Identifies a lock that has been obtained. */ public final class LockToken implements Serializable { - /** - * The pattern for parsing the date values returned from the native API. - */ - private static final String DATE_TIME_PATTERN = "yyyy-MM-dd_HH:mm:ss.SSS"; - - /** - * The time zone used for the time component of the build number. - */ - private static final ZoneId UTC_ZONE = ZoneId.of("UTC"); - - /** - * The {@link DateTimeFormatter} for interpreting the timestamps from the - * native API. - */ - private static final DateTimeFormatter DATE_TIME_FORMATTER - = DateTimeFormatter.ofPattern(DATE_TIME_PATTERN); - - /** - * The next lock token ID. - */ - private static long nextTokenId = 1L; - - /** - * The server key for this this server. - */ - private static final String LOCAL_HOST_KEY = formatHostKey(); - - /** - * The {@link LockScope} for this instance. - */ - private LockScope scope; - - /** - * The key that identifies the process on the server/host on where the lock - * was obtained. - */ - private String processKey; - - /** - * The key that identifies the host on which the lock was obtained. - */ - private String hostKey; - - /** - * The unique sequential numeric ID for this instance token instance. - */ - private long tokenId; - - /** - * The token key for this token which encapsulates the unique ID and the - * timestamp when it was created. - */ - private String tokenKey; - - /** - * The timestamp for when this instance was constructed. - */ - private Instant timestamp; - - /** - * Returns the next token ID. - * - * @return The next unique token ID. - */ - private synchronized long getNextTokenId() { - return nextTokenId++; - } - - /** - * Constructs with the specified {@link LockScope}. - * - * @param scope The {@link LockScope} for the token. - */ - public LockToken(LockScope scope) { - Objects.requireNonNull(scope, "The scope cannot be null"); - this.scope = scope; - ProcessHandle procHandle = ProcessHandle.current(); - long processId = procHandle.pid(); - this.processKey = String.valueOf(processId); - ProcessHandle.Info procInfo = procHandle.info(); - Optional startInstant = procInfo.startInstant(); - if (startInstant.isPresent()) { - ZonedDateTime startTime = startInstant.get().atZone(UTC_ZONE); - - this.processKey = this.processKey - + "#" + DATE_TIME_FORMATTER.format(startTime); - } - this.hostKey = LOCAL_HOST_KEY; - - Instant now = Instant.now(); - this.tokenId = getNextTokenId(); - - this.tokenKey = "[" + this.tokenId + "#" + this.scope - + "#" + DATE_TIME_FORMATTER.format(now.atZone(UTC_ZONE)) - + " ] @ [ " + this.processKey - + " ] @ [ " + this.hostKey - + " ]"; - - this.timestamp = now; - } - - /** - * Gets the {@linkS String} representation of the MAC address for the - * specified {@link NetworkInterface}. - * - * @param netInterface The {@link NetworkInterface} for which to get the - * MAC address. - * @return The {@link String} representation of the MAC address. - */ - private static String getMacAddress(NetworkInterface netInterface) - throws SocketException - { - byte[] mac = netInterface.getHardwareAddress(); - StringBuilder sb = new StringBuilder(); - String prefix = ""; - for (byte b : mac) { - sb.append(prefix).append(String.format("%02X", b)); - prefix = "-"; + /** + * The pattern for parsing the date values returned from the native API. + */ + private static final String DATE_TIME_PATTERN = "yyyy-MM-dd_HH:mm:ss.SSS"; + + /** + * The time zone used for the time component of the build number. + */ + private static final ZoneId UTC_ZONE = ZoneId.of("UTC"); + + /** + * The {@link DateTimeFormatter} for interpreting the timestamps from the native + * API. + */ + private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern(DATE_TIME_PATTERN); + + /** + * The next lock token ID. + */ + private static long nextTokenId = 1L; + + /** + * The server key for this this server. + */ + private static final String LOCAL_HOST_KEY = formatHostKey(); + + /** + * The {@link LockScope} for this instance. + */ + private LockScope scope; + + /** + * The key that identifies the process on the server/host on where the lock was + * obtained. + */ + private String processKey; + + /** + * The key that identifies the host on which the lock was obtained. + */ + private String hostKey; + + /** + * The unique sequential numeric ID for this instance token instance. + */ + private long tokenId; + + /** + * The token key for this token which encapsulates the unique ID and the + * timestamp when it was created. + */ + private String tokenKey; + + /** + * The timestamp for when this instance was constructed. + */ + private Instant timestamp; + + /** + * Returns the next token ID. + * + * @return The next unique token ID. + */ + private synchronized long getNextTokenId() { + return nextTokenId++; } - return sb.toString(); - } - - /** - * Calculates the server key using the network interfaces. This happens - * once when the class is loaded is reused throughout. - * - * @return The server key encoding the IP and MAC addresses for the server. - */ - private static String formatHostKey() { - try { - LinkedHashMap macAddrMap = new LinkedHashMap<>(); - LinkedList interfaces = new LinkedList<>(); - Enumeration allInterfaces - = NetworkInterface.getNetworkInterfaces(); - - while (allInterfaces.hasMoreElements()) { - NetworkInterface netInterface = allInterfaces.nextElement(); - if (netInterface.getHardwareAddress() == null) { - continue; - } - if (!netInterface.isUp()) { - continue; - } - if (netInterface.isVirtual()) { - continue; - } - if (netInterface.isPointToPoint()) { - continue; - } - Enumeration addrEnum = netInterface.getInetAddresses(); - if (!addrEnum.hasMoreElements()) { - continue; + /** + * Constructs with the specified {@link LockScope}. + * + * @param scope The {@link LockScope} for the token. + */ + public LockToken(LockScope scope) { + Objects.requireNonNull(scope, "The scope cannot be null"); + this.scope = scope; + ProcessHandle procHandle = ProcessHandle.current(); + long processId = procHandle.pid(); + this.processKey = String.valueOf(processId); + ProcessHandle.Info procInfo = procHandle.info(); + Optional startInstant = procInfo.startInstant(); + if (startInstant.isPresent()) { + ZonedDateTime startTime = startInstant.get().atZone(UTC_ZONE); + + this.processKey = this.processKey + "#" + DATE_TIME_FORMATTER.format(startTime); } + this.hostKey = LOCAL_HOST_KEY; - String macAddr = getMacAddress(netInterface); - Integer count = macAddrMap.get(macAddr); - if (count == null) { - macAddrMap.put(macAddr, 1); - } else { - macAddrMap.put(macAddr, count + 1); - } - interfaces.add(netInterface); - } - - String prefix = "("; - StringBuilder sb = new StringBuilder(); - for (NetworkInterface netInterface : interfaces) { - String macAddr = getMacAddress(netInterface); - Integer count = macAddrMap.get(macAddr); - if (count > 1) { - continue; - } + Instant now = Instant.now(); + this.tokenId = getNextTokenId(); + + this.tokenKey = "[" + this.tokenId + "#" + this.scope + "#" + DATE_TIME_FORMATTER.format(now.atZone(UTC_ZONE)) + + " ] @ [ " + this.processKey + " ] @ [ " + this.hostKey + " ]"; - Enumeration addresses = netInterface.getInetAddresses(); - while (addresses.hasMoreElements()) { - InetAddress inetAddr = addresses.nextElement(); - sb.append(prefix); - sb.append(inetAddr.getHostAddress()); - prefix = ", "; + this.timestamp = now; + } + + /** + * Gets the {@linkS String} representation of the MAC address for the specified + * {@link NetworkInterface}. + * + * @param netInterface The {@link NetworkInterface} for which to get the MAC + * address. + * @return The {@link String} representation of the MAC address. + */ + private static String getMacAddress(NetworkInterface netInterface) throws SocketException { + byte[] mac = netInterface.getHardwareAddress(); + StringBuilder sb = new StringBuilder(); + String prefix = ""; + for (byte b : mac) { + sb.append(prefix).append(String.format("%02X", b)); + prefix = "-"; } - sb.append(")#").append(macAddr); - prefix = " / ("; - } + return sb.toString(); + } - return sb.toString(); - } catch (SocketException e) { - throw new RuntimeException(e); + /** + * Calculates the server key using the network interfaces. This happens once + * when the class is loaded is reused throughout. + * + * @return The server key encoding the IP and MAC addresses for the server. + */ + private static String formatHostKey() { + try { + LinkedHashMap macAddrMap = new LinkedHashMap<>(); + LinkedList interfaces = new LinkedList<>(); + Enumeration allInterfaces = NetworkInterface.getNetworkInterfaces(); + + while (allInterfaces.hasMoreElements()) { + NetworkInterface netInterface = allInterfaces.nextElement(); + if (netInterface.getHardwareAddress() == null) { + continue; + } + if (!netInterface.isUp()) { + continue; + } + if (netInterface.isVirtual()) { + continue; + } + if (netInterface.isPointToPoint()) { + continue; + } + + Enumeration addrEnum = netInterface.getInetAddresses(); + if (!addrEnum.hasMoreElements()) { + continue; + } + + String macAddr = getMacAddress(netInterface); + Integer count = macAddrMap.get(macAddr); + if (count == null) { + macAddrMap.put(macAddr, 1); + } else { + macAddrMap.put(macAddr, count + 1); + } + interfaces.add(netInterface); + } + + String prefix = "("; + StringBuilder sb = new StringBuilder(); + for (NetworkInterface netInterface : interfaces) { + String macAddr = getMacAddress(netInterface); + Integer count = macAddrMap.get(macAddr); + if (count > 1) { + continue; + } + + Enumeration addresses = netInterface.getInetAddresses(); + while (addresses.hasMoreElements()) { + InetAddress inetAddr = addresses.nextElement(); + sb.append(prefix); + sb.append(inetAddr.getHostAddress()); + prefix = ", "; + } + sb.append(")#").append(macAddr); + prefix = " / ("; + } + + return sb.toString(); + } catch (SocketException e) { + throw new RuntimeException(e); + } } - } - - /** - * Gets the {@link LockScope} describing the scope of the lock identified by - * this lock token. - * - * @return The {@link LockScope} describing the scope of the lock identified - * by this lock token. - */ - public LockScope getScope() { - return this.scope; - } - - /** - * Gets the unique (within process) sequential token ID for this instance. - * - * @return The unique (within process) sequential token ID for this instance. - */ - public long getTokenId() { - return this.tokenId; - } - - /** - * Gets the {@link Instant} timestamp when this instance was constructed. - * - * @return The {@link Instant} timestamp when this instance was constructed. - */ - public Instant getTimestamp() { - return this.timestamp; - } - - /** - * Gets the encoded {@link String} key for the process in which the lock - * token instance was originally constructed. - * - * @return The encoded {@link String} key for the process in which the lock - * token instance was originally constructed. - */ - public String getProcessKey() { - return this.processKey; - } - - /** - * Gets the encoded {@link String} key for the host/server on which the lock - * token instance was originally constructed. - * - * @return The encoded {@link String} key for the host/server on which the - * lock token instance was originally constructed. - */ - public String getHostKey() { - return this.hostKey; - } - - /** - * Gets the full formatted token key which formats the elements of this - * lock token into a unique descriptive {@link String} describing when, - * where and how the resource is locked. This can be used to uniquely - * represent this {@link LockToken} as a {@link String}. - * - * @return The unique token key for this instance. - */ - public String getTokenKey() { - return this.tokenKey; - } - - /** - * Overridden to return true if and only if the specified - * parameter is a non-null reference to an object of the same class with - * equivalent properties. - * - * @param obj The object to compare with. - * @return true if the objects are equal, otherwise - * false. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; + + /** + * Gets the {@link LockScope} describing the scope of the lock identified by + * this lock token. + * + * @return The {@link LockScope} describing the scope of the lock identified by + * this lock token. + */ + public LockScope getScope() { + return this.scope; } - if (this == obj) { - return true; + + /** + * Gets the unique (within process) sequential token ID for this instance. + * + * @return The unique (within process) sequential token ID for this instance. + */ + public long getTokenId() { + return this.tokenId; } - if (obj.getClass() != this.getClass()) { - return false; + + /** + * Gets the {@link Instant} timestamp when this instance was constructed. + * + * @return The {@link Instant} timestamp when this instance was constructed. + */ + public Instant getTimestamp() { + return this.timestamp; } - LockToken that = (LockToken) obj; - if (!Objects.equals(this.getScope(), that.getScope())) { - return false; + + /** + * Gets the encoded {@link String} key for the process in which the lock token + * instance was originally constructed. + * + * @return The encoded {@link String} key for the process in which the lock + * token instance was originally constructed. + */ + public String getProcessKey() { + return this.processKey; } - if (!Objects.equals(this.getTokenId(), that.getTokenId())) { - return false; + + /** + * Gets the encoded {@link String} key for the host/server on which the lock + * token instance was originally constructed. + * + * @return The encoded {@link String} key for the host/server on which the lock + * token instance was originally constructed. + */ + public String getHostKey() { + return this.hostKey; } - if (!Objects.equals(this.getTimestamp(), that.getTimestamp())) { - return false; + + /** + * Gets the full formatted token key which formats the elements of this lock + * token into a unique descriptive {@link String} describing when, where and how + * the resource is locked. This can be used to uniquely represent this + * {@link LockToken} as a {@link String}. + * + * @return The unique token key for this instance. + */ + public String getTokenKey() { + return this.tokenKey; } - if (!Objects.equals(this.getProcessKey(), that.getProcessKey())) { - return false; + + /** + * Overridden to return true if and only if the specified parameter + * is a non-null reference to an object of the same class with equivalent + * properties. + * + * @param obj The object to compare with. + * @return true if the objects are equal, otherwise + * false. + */ + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (this == obj) { + return true; + } + if (obj.getClass() != this.getClass()) { + return false; + } + LockToken that = (LockToken) obj; + if (!Objects.equals(this.getScope(), that.getScope())) { + return false; + } + if (!Objects.equals(this.getTokenId(), that.getTokenId())) { + return false; + } + if (!Objects.equals(this.getTimestamp(), that.getTimestamp())) { + return false; + } + if (!Objects.equals(this.getProcessKey(), that.getProcessKey())) { + return false; + } + if (!Objects.equals(this.getHostKey(), that.getHostKey())) { + return false; + } + if (!Objects.equals(this.getTokenKey(), that.getTokenKey())) { + return false; + } + return true; } - if (!Objects.equals(this.getHostKey(), that.getHostKey())) { - return false; + + /** + * Overridden to return a hash code that is consistent with the + * {@link #equals(Object)} method. + * + * @return The hash code for this instance. + */ + @Override + public int hashCode() { + return Objects.hash(this.getScope(), this.getTokenId(), this.getTimestamp(), this.getProcessKey(), + this.getHostKey(), this.getTokenKey()); } - if (!Objects.equals(this.getTokenKey(), that.getTokenKey())) { - return false; + + /** + * Overridden to return the result from {@link #getTokenKey()}. + * + * @return The result from {@link #getTokenKey()}. + */ + public String toString() { + return this.getTokenKey(); } - return true; - } - - /** - * Overridden to return a hash code that is consistent with the {@link - * #equals(Object)} method. - * - * @return The hash code for this instance. - */ - @Override - public int hashCode() { - return Objects.hash(this.getScope(), - this.getTokenId(), - this.getTimestamp(), - this.getProcessKey(), - this.getHostKey(), - this.getTokenKey()); - } - - /** - * Overridden to return the result from {@link #getTokenKey()}. - * - * @return The result from {@link #getTokenKey()}. - */ - public String toString() { - return this.getTokenKey(); - } - - /** - * Test main method to create tokens and print out their token keys. - * @param args The command-line arguments. - */ - public static void main(String[] args) { - try { - LockScope[] scopes = LockScope.values(); - for (int index = 0; index < 10; index++) { - LockToken lockToken = new LockToken(scopes[index % 3]); - System.out.println(); - System.out.println("---------------------------------------------"); - System.out.println(lockToken); - } - } catch (Exception e) { - e.printStackTrace(); + + /** + * Test main method to create tokens and print out their token keys. + * + * @param args The command-line arguments. + */ + public static void main(String[] args) { + try { + LockScope[] scopes = LockScope.values(); + for (int index = 0; index < 10; index++) { + LockToken lockToken = new LockToken(scopes[index % 3]); + System.out.println(); + System.out.println("---------------------------------------------"); + System.out.println(lockToken); + } + } catch (Exception e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + } } - } } diff --git a/src/main/java/com/senzing/listener/service/locking/ResourceKey.java b/src/main/java/com/senzing/listener/service/locking/ResourceKey.java index 1f186fc..0fa68ac 100644 --- a/src/main/java/com/senzing/listener/service/locking/ResourceKey.java +++ b/src/main/java/com/senzing/listener/service/locking/ResourceKey.java @@ -12,258 +12,255 @@ import java.util.Objects; import static com.senzing.io.IOUtilities.UTF_8; +import static com.senzing.util.LoggingUtilities.formatStackTrace; /** * Provides a key for identifying a resource that can be locked via the * {@link LockingService}. */ public final class ResourceKey implements Serializable, Comparable { - /** - * The resource type. - */ - private String resourceType = null; + /** + * The resource type. + */ + private String resourceType = null; - /** - * The unmodifiable {@link List} of components that more specifically - * identify the resource. - */ - private List components = null; + /** + * The unmodifiable {@link List} of components that more specifically + * identify the resource. + */ + private List components = null; - /** - * Constructs with the {@link String} key identifying the type of resource - * (e.g.: "RECORD", "ENTITY", or - * "REPORT") followed by zero or more key components that more - * specifically identify the resource within the type of resource (e.g.: an - * entity ID or a data source code followed by a record ID). - * - * @param resourceType The type of resource being identified. - * @param components Zero or more key components that more specifically - * identify the resource. - * @throws NullPointerException If the resource type is null. - */ - public ResourceKey(String resourceType, String... components) { - Objects.requireNonNull(resourceType); - this.resourceType = resourceType; - this.components = (components == null) ? List.of() : List.of(components); - } - - /** - * Constructs with the {@link String} key identifying the type of resource - * (e.g.: "RECORD", "ENTITY", or - * "REPORT") followed by zero or more key components that more - * specifically identify the resource within the type of resource (e.g.: an - * entity ID or a data source code followed by a record ID). This constructor - * version converts the one or more {@link Object} component instances to - * {@link String} instances. - * - * @param resourceType The type of resource being identified. - * @param components Zero or more key components that more specifically - * identify the resource. - * @throws NullPointerException If the resource type is null. - */ - public ResourceKey(String resourceType, Object... components) { - Objects.requireNonNull(resourceType); - this.resourceType = resourceType; - if (components == null) { - this.components = List.of(); - } else { - this.components = new ArrayList<>(components.length); - for (Object comp : components) { - this.components.add(String.valueOf(comp)); - } - this.components = Collections.unmodifiableList(this.components); + /** + * Constructs with the {@link String} key identifying the type of resource + * (e.g.: "RECORD", "ENTITY", or + * "REPORT") followed by zero or more key components that more + * specifically identify the resource within the type of resource (e.g.: an + * entity ID or a data source code followed by a record ID). + * + * @param resourceType The type of resource being identified. + * @param components Zero or more key components that more specifically + * identify the resource. + * @throws NullPointerException If the resource type is null. + */ + public ResourceKey(String resourceType, String... components) { + Objects.requireNonNull(resourceType); + this.resourceType = resourceType; + this.components = (components == null) ? List.of() : List.of(components); } - } - - /** - * Returns the resource type that identifies the type of resource. Examples - * may be "ENTITY", "RECORD" or - * "REPORT". - * - * @return The resource type that identifies the type of resource. - */ - public String getResourceType() { - return this.resourceType; - } - - /** - * Returns the unmodifiable {@link List} of {@link String} components - * that more specifically identify the resource being locked within the - * resource type. The returned {@link List} may be empty, but will not - * be null; however, elements in the {@link List} may be - * null. - * - * @return The unmodifiable {@link List} of {@link String} components - * that more specifically identify the resource being locked within - * the resource type. - */ - public List getComponents() { - return this.components; - } - /** - * Overridden to produce a hash code for this instance based on the resource - * type and component parts. - * - * @return The hash code for this instance. - */ - @Override - public int hashCode() { - return this.resourceType.hashCode() ^ Objects.hash(this.components); - } - - /** - * Overridden to return true if and only if the specified - * parameter is a non-null reference to an instance of the same class with - * an equivalent resource type and all component parts in the same order. - * - * @param obj The object to compare with. - * @return true if the objects are equal, otherwise - * false. - */ - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (this == obj) { - return true; - } - if (this.getClass() != obj.getClass()) { - return false; + /** + * Constructs with the {@link String} key identifying the type of resource + * (e.g.: "RECORD", "ENTITY", or + * "REPORT") followed by zero or more key components that more + * specifically identify the resource within the type of resource (e.g.: an + * entity ID or a data source code followed by a record ID). This constructor + * version converts the one or more {@link Object} component instances to + * {@link String} instances. + * + * @param resourceType The type of resource being identified. + * @param components Zero or more key components that more specifically + * identify the resource. + * @throws NullPointerException If the resource type is null. + */ + public ResourceKey(String resourceType, Object... components) { + Objects.requireNonNull(resourceType); + this.resourceType = resourceType; + if (components == null) { + this.components = List.of(); + } else { + this.components = new ArrayList<>(components.length); + for (Object comp : components) { + this.components.add(String.valueOf(comp)); + } + this.components = Collections.unmodifiableList(this.components); + } } - ResourceKey key = (ResourceKey) obj; - return Objects.equals(this.getResourceType(), key.getResourceType()) - && Objects.equals(this.getComponents(), key.getComponents()); - } - /** - * Overridden to return a result consistent with the {@link #equals(Object)} - * method that orders resource keys first by their resource types and then - * by their component identifying parts according to {@link - * String#compareTo(String)}. - * - * @param key The {@link ResourceKey} to compare with. - * @return A negative number is less-than, zero (0) if equal and a positive - * number if greater than the specified instance. - */ - @Override - public int compareTo(ResourceKey key) { - int diff = this.getResourceType().compareTo(key.getResourceType()); - if (diff != 0) { - return diff; + /** + * Returns the resource type that identifies the type of resource. Examples may + * be "ENTITY", "RECORD" or "REPORT". + * + * @return The resource type that identifies the type of resource. + */ + public String getResourceType() { + return this.resourceType; } - List comp1 = this.getComponents(); - List comp2 = key.getComponents(); - diff = comp1.size() - comp2.size(); - if (diff != 0) { - return diff; + + /** + * Returns the unmodifiable {@link List} of {@link String} components + * that more specifically identify the resource being locked within the resource + * type. The returned {@link List} may be empty, but will not be + * null; however, elements in the {@link List} may be + * null. + * + * @return The unmodifiable {@link List} of {@link String} components + * that more specifically identify the resource being locked within the + * resource type. + */ + public List getComponents() { + return this.components; } - int count = comp1.size(); - for (int index = 0; index < count; index++) { - String s1 = comp1.get(index); - String s2 = comp2.get(index); - if (Objects.equals(s1, s2)) { - continue; - } - if (s1 == null && s2 != null) { - return -1; - } - if (s1 != null && s2 == null) { - return 1; - } - diff = s1.compareTo(s2); - if (diff != 0) { - return diff; - } + + /** + * Overridden to produce a hash code for this instance based on the resource + * type and component parts. + * + * @return The hash code for this instance. + */ + @Override + public int hashCode() { + return this.resourceType.hashCode() ^ Objects.hash(this.components); } - return 0; - } - /** - * Overridden to return a {@link String} representation of this instance. - * - * @return A {@link String} representation of this instance. - */ - @Override - public String toString() { - try { - StringBuilder sb = new StringBuilder(); - sb.append(URLEncoder.encode(this.getResourceType(), UTF_8)); - for (String comp : this.getComponents()) { - sb.append(":").append(URLEncoder.encode(comp, UTF_8)); - } - return sb.toString(); - } catch (UnsupportedEncodingException cannotHappen) { - throw new IllegalStateException("UTF-8 Encoding is not supported"); + /** + * Overridden to return true if and only if the specified parameter + * is a non-null reference to an instance of the same class with an equivalent + * resource type and all component parts in the same order. + * + * @param obj The object to compare with. + * @return true if the objects are equal, otherwise + * false. + */ + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (this == obj) { + return true; + } + if (this.getClass() != obj.getClass()) { + return false; + } + ResourceKey key = (ResourceKey) obj; + return Objects.equals(this.getResourceType(), key.getResourceType()) + && Objects.equals(this.getComponents(), key.getComponents()); } - } - /** - * Parses the encoded text as a {@link ResourceKey}. The encoding is done - * by the {@link #toString()} method. This method returns null - * if the specified parameter is null. - * - * @param text The text to parse. - * - * @return The {@link ResourceKey} parsed from the specified text, or - * null if the specified parameter is null. - * @throws IllegalArgumentException If the specified parameter is an empty - * string after trimming leading and trailing - * white space. - */ - public static ResourceKey parse(String text) - throws IllegalArgumentException { - // return null if parameter is null - if (text == null) { - return null; + /** + * Overridden to return a result consistent with the {@link #equals(Object)} + * method that orders resource keys first by their resource types and then by + * their component identifying parts according to + * {@link String#compareTo(String)}. + * + * @param key The {@link ResourceKey} to compare with. + * @return A negative number is less-than, zero (0) if equal and a positive + * number if greater than the specified instance. + */ + @Override + public int compareTo(ResourceKey key) { + int diff = this.getResourceType().compareTo(key.getResourceType()); + if (diff != 0) { + return diff; + } + List comp1 = this.getComponents(); + List comp2 = key.getComponents(); + diff = comp1.size() - comp2.size(); + if (diff != 0) { + return diff; + } + int count = comp1.size(); + for (int index = 0; index < count; index++) { + String s1 = comp1.get(index); + String s2 = comp2.get(index); + if (Objects.equals(s1, s2)) { + continue; + } + if (s1 == null && s2 != null) { + return -1; + } + if (s1 != null && s2 == null) { + return 1; + } + diff = s1.compareTo(s2); + if (diff != 0) { + return diff; + } + } + return 0; } - // trim leading and trailing whitespace - text = text.trim(); - if (text.length() == 0) { - throw new IllegalArgumentException( - "The specified text cannot be an empty string or only whitespace."); + /** + * Overridden to return a {@link String} representation of this instance. + * + * @return A {@link String} representation of this instance. + */ + @Override + public String toString() { + try { + StringBuilder sb = new StringBuilder(); + sb.append(URLEncoder.encode(this.getResourceType(), UTF_8)); + for (String comp : this.getComponents()) { + sb.append(":").append(URLEncoder.encode(comp, UTF_8)); + } + return sb.toString(); + } catch (UnsupportedEncodingException cannotHappen) { + throw new IllegalStateException("UTF-8 Encoding is not supported"); + } } - String[] tokens = text.split(":"); - try { - String resourceType = URLDecoder.decode(tokens[0], UTF_8); - String[] components = new String[tokens.length - 1]; - for (int index = 1; index < tokens.length; index++) { - components[index - 1] = URLDecoder.decode(tokens[index], UTF_8); - } - return new ResourceKey(resourceType, components); + /** + * Parses the encoded text as a {@link ResourceKey}. The encoding is done by the + * {@link #toString()} method. This method returns null if the + * specified parameter is null. + * + * @param text The text to parse. + * + * @return The {@link ResourceKey} parsed from the specified text, or + * null if the specified parameter is null. + * @throws IllegalArgumentException If the specified parameter is an empty + * string after trimming leading and trailing + * white space. + */ + public static ResourceKey parse(String text) throws IllegalArgumentException { + // return null if parameter is null + if (text == null) { + return null; + } - } catch (UnsupportedEncodingException cannotHappen) { - throw new IllegalStateException("UTF-8 supporting is not supported."); - } + // trim leading and trailing whitespace + text = text.trim(); + if (text.length() == 0) { + throw new IllegalArgumentException("The specified text cannot be an empty string or only whitespace."); + } - } + String[] tokens = text.split(":"); + try { + String resourceType = URLDecoder.decode(tokens[0], UTF_8); + String[] components = new String[tokens.length - 1]; + for (int index = 1; index < tokens.length; index++) { + components[index - 1] = URLDecoder.decode(tokens[index], UTF_8); + } + return new ResourceKey(resourceType, components); + + } catch (UnsupportedEncodingException cannotHappen) { + throw new IllegalStateException("UTF-8 supporting is not supported."); + } + + } - /** - * Simple test main that constructs a {@link ResourceKey} and converts it to - * a {@link String}. - * - * @param args The command-line arguments. - */ - public static void main(String[] args) { - try { - if (args.length == 0) { - System.err.println("Required parameters: *"); - System.exit(1); - } - String resourceType = args[0]; - String[] components = (args.length > 1) - ? CommandLineUtilities.shiftArguments(args, 1) - : new String[0]; + /** + * Simple test main that constructs a {@link ResourceKey} and converts it to a + * {@link String}. + * + * @param args The command-line arguments. + */ + public static void main(String[] args) { + try { + if (args.length == 0) { + System.err.println("Required parameters: *"); + System.exit(1); + } + String resourceType = args[0]; + String[] components = (args.length > 1) ? CommandLineUtilities.shiftArguments(args, 1) : new String[0]; - ResourceKey key = new ResourceKey(resourceType, components); + ResourceKey key = new ResourceKey(resourceType, components); - System.out.println(key); + System.out.println(key); - } catch (Exception e) { - e.printStackTrace(); + } catch (Exception e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + } } - } } diff --git a/src/main/java/com/senzing/listener/service/scheduling/AbstractSQLSchedulingService.java b/src/main/java/com/senzing/listener/service/scheduling/AbstractSQLSchedulingService.java index fab03ab..be8dbee 100644 --- a/src/main/java/com/senzing/listener/service/scheduling/AbstractSQLSchedulingService.java +++ b/src/main/java/com/senzing/listener/service/scheduling/AbstractSQLSchedulingService.java @@ -21,759 +21,683 @@ /** * Implements {@link SchedulingService} using a SQLite database to handle - * persisting the follow-up tasks by extending {@link - * AbstractSchedulingService}. + * persisting the follow-up tasks by extending + * {@link AbstractSchedulingService}. */ -public abstract class AbstractSQLSchedulingService - extends AbstractSchedulingService -{ - /** - * The {@link Calendar} to use for retrieving timestamps from the database. - */ - private static final Calendar UTC_CALENDAR - = Calendar.getInstance(TimeZone.getTimeZone("UTC")); - - /** - * The initialization parameter key for checking if the persistent store - * of follow-up tasks should be dropped / deleted and recreated during - * initialization. Values should be true or false. - */ - public static final String CLEAN_DATABASE_KEY = "cleanDatabase"; - - /** - * The initialization parameter key for obtaining the {@link - * ConnectionProvider} to use for connecting to the database from the - * {@link ConnectionProvider#REGISTRY}. - */ - public static final String CONNECTION_PROVIDER_KEY = "connectionProvider"; - - /** - * The number of expired follow-up tasks. - */ - private long totalExpiredFollowUpTaskCount = 0L; - - /** - * The {@link ConnectionProvider} to use. - */ - private ConnectionProvider connectionProvider; - - /** - * The {@link DatabaseType} for this instance. - */ - private DatabaseType databaseType = null; - - /** - * Default constructor. - */ - protected AbstractSQLSchedulingService() { - // do nothing - } - - /** - * Gets a JDBC {@link Connection} to use. Typically these are obtained from - * a backing pool so repeated calls to this function without closing the - * previously obtained {@link Connection} instances could exhaust the pool. - * This may block until a {@link Connection} is available. - * - * @return The {@link Connection} that was obtained. - * - * @throws SQLException If a JDBC failure occurs. - */ - protected Connection getConnection() throws SQLException { - return this.connectionProvider.getConnection(); - } - - /** - * Overridden to obtain the {@link ConnectionProvider}. - * - * {@inheritDoc} - * - * @param config The {@link JsonObject} describing the configuration. - */ - protected void doInit(JsonObject config) - throws ServiceSetupException - { - try { - Boolean clean = getConfigBoolean(config, CLEAN_DATABASE_KEY, FALSE); - - String providerKey = getConfigString(config, - CONNECTION_PROVIDER_KEY, - true); - - - try { - this.connectionProvider = ConnectionProvider.REGISTRY.lookup(providerKey); - } catch (NameNotFoundException e) { - throw new ServiceSetupException( - "No ConnectionProvider was registered to the name specified by the " - + "\"" + CONNECTION_PROVIDER_KEY + "\" initialization parameter: " - + providerKey); - } - - // set the database type - this.databaseType = this.initDatabaseType(); - - // ensure the schema exists - this.ensureSchema(clean); - - } catch (SQLException e) { - throw new ServiceSetupException( - "Failed to connect to database or initialize schema.", e); +public abstract class AbstractSQLSchedulingService extends AbstractSchedulingService { + /** + * The {@link Calendar} to use for retrieving timestamps from the database. + */ + private static final Calendar UTC_CALENDAR = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + + /** + * The initialization parameter key for checking if the persistent store of + * follow-up tasks should be dropped / deleted and recreated during + * initialization. Values should be true or false. + */ + public static final String CLEAN_DATABASE_KEY = "cleanDatabase"; + + /** + * The initialization parameter key for obtaining the {@link ConnectionProvider} + * to use for connecting to the database from the + * {@link ConnectionProvider#REGISTRY}. + */ + public static final String CONNECTION_PROVIDER_KEY = "connectionProvider"; + + /** + * The number of expired follow-up tasks. + */ + private long totalExpiredFollowUpTaskCount = 0L; + + /** + * The {@link ConnectionProvider} to use. + */ + private ConnectionProvider connectionProvider; + + /** + * The {@link DatabaseType} for this instance. + */ + private DatabaseType databaseType = null; + + /** + * Default constructor. + */ + protected AbstractSQLSchedulingService() { + // do nothing } - } - - /** - * Initializes the {@link DatabaseType} to use for formatting SQL statements. - * - * @return The {@link DatabaseType} to use. - * - * @throws SQLException If a failure occurs. - */ - protected DatabaseType initDatabaseType() throws SQLException { - Connection conn = this.getConnection(); - try { - return DatabaseType.detect(conn); - } finally { - conn = close(conn); - } - } - - /** - * Gets the {@link DatabaseType} used by this instance. - * - * @return The {@link DatabaseType} used by this instance. - */ - public DatabaseType getDatabaseType() { - return this.databaseType; - } - - /** - * Ensures the schema exists and alternatively drops the existing the schema - * and recreates it. This is called from {@link #doInit(JsonObject)}. - * - * @param recreate true if the existing schema should be - * dropped, otherwise false. - * - * @throws SQLException If a failure occurs. - */ - protected abstract void ensureSchema(boolean recreate) throws SQLException; - - /** - * Overridden to do nothing. - */ - @Override - protected void doDestroy() { - // do nothing - } - - /** - * Implemented to save the specified follow-up tasks to the backing SQLite - * persistent store. - */ - @Override - protected synchronized void enqueueFollowUpTask(Task task) - throws ServiceExecutionException - { - Connection conn = null; - boolean success = false; - try { - // obtain the connection - conn = this.getConnection(); - - // update the multiplicity if the row exists - boolean updated = this.incrementFollowUpMultiplicity(conn, task); - - // check if we updated a row - if (!updated) { - // insert a new row since none was updated - this.insertNewFollowUpTask(conn, task); - } - - // commit the connection - conn.commit(); - success = true; - - } catch (SQLException e) { - e.printStackTrace(); - throw new ServiceExecutionException("JDBC failure occurred", e); - - } finally { - if (!success) { - rollback(conn); - } - conn = close(conn); - } - } - - /** - * Increments the multiplicity for the specified follow-up task in the - * database by updating the associated row if it exists. This - * true if the row existed and was updated, otherwise - * false - * - * @param conn The {@link Connection} to use to connect to the database. - * @param task The {@link Task} describing the row to update. - * @return true if the row existed and was updated, otherwise - * false. - * @throws SQLException If a JDBC failure occurs. - */ - protected boolean incrementFollowUpMultiplicity(Connection conn, Task task) - throws SQLException - { - PreparedStatement ps = null; - try { - // prepare the statement - ps = conn.prepareStatement( - "UPDATE sz_follow_up_tasks " - + "SET multiplicity = multiplicity + 1 " - + "WHERE signature = ? " - + "AND allow_collapse_flag = 1 " - + "AND expire_lease_at IS NULL " - + "AND task_id = (SELECT MAX(task_id) FROM sz_follow_up_tasks " - + "WHERE signature = ? AND allow_collapse_flag = 1 " - + "AND expire_lease_at IS NULL)"); - - ps.setString(1, task.getSignature()); - ps.setString(2, task.getSignature()); - - int rowCount = ps.executeUpdate(); - - if (rowCount == 0) { - return false; - } else if (rowCount == 1) { - return true; - } else { - logError("MULTIPLE ROWS UPDATED FOR FOLLOW-UP TASK: ", task); - throw new IllegalStateException( - "Somehow updated multiple rows when updating task multiplicity. " - + "task=[ " + task + " ]"); - } - - } finally { - ps = close(ps); - } - } - - /** - * Inserts a new follow-up task in the database schema. - * - * @param conn The {@link Connection} to use to connect to the database. - * @param task The {@link Task} describing the row to insert. - * @throws SQLException If a JDBC failure occurs. - */ - protected void insertNewFollowUpTask(Connection conn, Task task) - throws SQLException - { - PreparedStatement ps = null; - try { - ps = conn.prepareStatement( - "INSERT INTO sz_follow_up_tasks (" - + "signature, allow_collapse_flag, json_text) VALUES (?, ?, ?)"); - ps.setString(1, task.getSignature()); - ps.setInt(2, (task.isAllowingCollapse() ? 1 : 0)); - ps.setString(3, task.toJsonText()); - - int rowCount = ps.executeUpdate(); - - if (rowCount != 1) { - throw new SQLException( - "Unexpected row count on insert: " + rowCount); - } - } finally { - ps = close(ps); - } - } - - /** - * This message can be used for debugging to dump the contents of the - * follow-up table to standard error. - * - * @throws SQLException If a JDBC failure occurs. - */ - protected void dumpFollowUpTable() throws SQLException { - Connection conn = null; - PreparedStatement ps = null; - ResultSet rs = null; - try { - conn = this.getConnection(); - - StringBuilder sb = new StringBuilder(); - sb.append("SELECT task_id, json_text, signature, multiplicity, "); - sb.append("lease_id, expire_lease_at, modified_on, created_on "); - sb.append("FROM sz_follow_up_tasks "); - - ps = conn.prepareStatement(sb.toString()); - - rs = ps.executeQuery(); - - long now = System.currentTimeMillis(); - long delayTime = now - this.getFollowUpDelay(); - long timeoutTime = now - this.getFollowUpTimeout(); - - System.err.println(); - System.err.println("-------------------------------------------------"); - while (rs.next()) { - System.err.println( - rs.getLong(1) + " / " + rs.getString(2) - + " / " + rs.getInt(4) - + " / " + rs.getString(5) - + " / " + rs.getTimestamp(6, UTC_CALENDAR) - + " / " + rs.getTimestamp(7, UTC_CALENDAR) - + " vs " + (new Timestamp(delayTime)) - + " / " + rs.getTimestamp(8, UTC_CALENDAR) - + " vs " + (new Timestamp(timeoutTime))); - } - rs = close(rs); - ps = close(ps); - - } catch (SQLException e) { - e.printStackTrace(); - throw e; - - } finally { - rs = close(rs); - ps = close(ps); - conn = close(conn); + + /** + * Gets a JDBC {@link Connection} to use. Typically these are obtained from a + * backing pool so repeated calls to this function without closing the + * previously obtained {@link Connection} instances could exhaust the pool. This + * may block until a {@link Connection} is available. + * + * @return The {@link Connection} that was obtained. + * + * @throws SQLException If a JDBC failure occurs. + */ + protected Connection getConnection() throws SQLException { + return this.connectionProvider.getConnection(); } - } - - /** - * Fetches at most the specified number of follow-up tasks from the backing - * SQLite persistent store. - * - * @param count The suggested number of follow-up tasks to retrieve from - * persistent storage. - * - * @return The {@link List} of dequeued follow-up tasks. - * - * @throws ServiceExecutionException If a failure occurs. - */ - @Override - protected synchronized List dequeueFollowUpTasks(int count) - throws ServiceExecutionException - { - Connection conn = null; - boolean success = false; - try { - // get a connection - conn = this.getConnection(); - - // first release any expired leases - int released = this.releaseExpiredLeases(conn); - - if (released > 0) { - synchronized (this.getStatisticsMonitor()) { - //System.err.println("EXPIRED LEASE ON " + rowCount + " FOLLOW UP TASKS"); - this.totalExpiredFollowUpTaskCount += released; + + /** + * Overridden to obtain the {@link ConnectionProvider}. + * + * {@inheritDoc} + * + * @param config The {@link JsonObject} describing the configuration. + */ + protected void doInit(JsonObject config) throws ServiceSetupException { + try { + Boolean clean = getConfigBoolean(config, CLEAN_DATABASE_KEY, FALSE); + + String providerKey = getConfigString(config, CONNECTION_PROVIDER_KEY, true); + + try { + this.connectionProvider = ConnectionProvider.REGISTRY.lookup(providerKey); + } catch (NameNotFoundException e) { + throw new ServiceSetupException("No ConnectionProvider was registered to the name specified by the " + + "\"" + CONNECTION_PROVIDER_KEY + "\" initialization parameter: " + providerKey); + } + + // set the database type + this.databaseType = this.initDatabaseType(); + + // ensure the schema exists + this.ensureSchema(clean); + + } catch (SQLException e) { + throw new ServiceSetupException("Failed to connect to database or initialize schema.", e); } - } + } - // generate a unique lease ID - String leaseId = this.generateLeaseId(); + /** + * Initializes the {@link DatabaseType} to use for formatting SQL statements. + * + * @return The {@link DatabaseType} to use. + * + * @throws SQLException If a failure occurs. + */ + protected DatabaseType initDatabaseType() throws SQLException { + Connection conn = this.getConnection(); + try { + return DatabaseType.detect(conn); + } finally { + conn = close(conn); + } + } - // lease the follow-up tasks - int leasedCount = this.leaseFollowUpTasks(conn, count, leaseId); + /** + * Gets the {@link DatabaseType} used by this instance. + * + * @return The {@link DatabaseType} used by this instance. + */ + public DatabaseType getDatabaseType() { + return this.databaseType; + } - // this.dumpFollowUpTable(); + /** + * Ensures the schema exists and alternatively drops the existing the schema and + * recreates it. This is called from {@link #doInit(JsonObject)}. + * + * @param recreate true if the existing schema should be dropped, + * otherwise false. + * + * @throws SQLException If a failure occurs. + */ + protected abstract void ensureSchema(boolean recreate) throws SQLException; + + /** + * Overridden to do nothing. + */ + @Override + protected void doDestroy() { + // do nothing + } - // check if no rows were updated - if (leasedCount == 0) { - return new ArrayList<>(0); - } + /** + * Implemented to save the specified follow-up tasks to the backing SQLite + * persistent store. + */ + @Override + protected synchronized void enqueueFollowUpTask(Task task) throws ServiceExecutionException { + Connection conn = null; + boolean success = false; + try { + // obtain the connection + conn = this.getConnection(); + + // update the multiplicity if the row exists + boolean updated = this.incrementFollowUpMultiplicity(conn, task); + + // check if we updated a row + if (!updated) { + // insert a new row since none was updated + this.insertNewFollowUpTask(conn, task); + } + + // commit the connection + conn.commit(); + success = true; + + } catch (SQLException e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + throw new ServiceExecutionException("JDBC failure occurred", e); + + } finally { + if (!success) { + rollback(conn); + } + conn = close(conn); + } + } - // now get the leased rows - List result = this.getLeasedFollowUpTasks(conn, leaseId); + /** + * Increments the multiplicity for the specified follow-up task in the database + * by updating the associated row if it exists. This true if the + * row existed and was updated, otherwise false + * + * @param conn The {@link Connection} to use to connect to the database. + * @param task The {@link Task} describing the row to update. + * @return true if the row existed and was updated, otherwise + * false. + * @throws SQLException If a JDBC failure occurs. + */ + protected boolean incrementFollowUpMultiplicity(Connection conn, Task task) throws SQLException { + PreparedStatement ps = null; + try { + // prepare the statement + ps = conn.prepareStatement("UPDATE sz_follow_up_tasks " + "SET multiplicity = multiplicity + 1 " + + "WHERE signature = ? " + "AND allow_collapse_flag = 1 " + "AND expire_lease_at IS NULL " + + "AND task_id = (SELECT MAX(task_id) FROM sz_follow_up_tasks " + + "WHERE signature = ? AND allow_collapse_flag = 1 " + "AND expire_lease_at IS NULL)"); + + ps.setString(1, task.getSignature()); + ps.setString(2, task.getSignature()); + + int rowCount = ps.executeUpdate(); + + if (rowCount == 0) { + return false; + } else if (rowCount == 1) { + return true; + } else { + logError("MULTIPLE ROWS UPDATED FOR FOLLOW-UP TASK: ", task); + throw new IllegalStateException( + "Somehow updated multiple rows when updating task multiplicity. " + "task=[ " + task + " ]"); + } + + } finally { + ps = close(ps); + } + } - // commit the transaction - conn.commit(); - success = true; + /** + * Inserts a new follow-up task in the database schema. + * + * @param conn The {@link Connection} to use to connect to the database. + * @param task The {@link Task} describing the row to insert. + * @throws SQLException If a JDBC failure occurs. + */ + protected void insertNewFollowUpTask(Connection conn, Task task) throws SQLException { + PreparedStatement ps = null; + try { + ps = conn.prepareStatement( + "INSERT INTO sz_follow_up_tasks (" + "signature, allow_collapse_flag, json_text) VALUES (?, ?, ?)"); + ps.setString(1, task.getSignature()); + ps.setInt(2, (task.isAllowingCollapse() ? 1 : 0)); + ps.setString(3, task.toJsonText()); + + int rowCount = ps.executeUpdate(); + + if (rowCount != 1) { + throw new SQLException("Unexpected row count on insert: " + rowCount); + } + } finally { + ps = close(ps); + } + } - // return the result list - return result; + /** + * This message can be used for debugging to dump the contents of the follow-up + * table to standard error. + * + * @throws SQLException If a JDBC failure occurs. + */ + protected void dumpFollowUpTable() throws SQLException { + Connection conn = null; + PreparedStatement ps = null; + ResultSet rs = null; + try { + conn = this.getConnection(); + + StringBuilder sb = new StringBuilder(); + sb.append("SELECT task_id, json_text, signature, multiplicity, "); + sb.append("lease_id, expire_lease_at, modified_on, created_on "); + sb.append("FROM sz_follow_up_tasks "); + + ps = conn.prepareStatement(sb.toString()); + + rs = ps.executeQuery(); + + long now = System.currentTimeMillis(); + long delayTime = now - this.getFollowUpDelay(); + long timeoutTime = now - this.getFollowUpTimeout(); + + System.err.println(); + System.err.println("-------------------------------------------------"); + while (rs.next()) { + System.err.println(rs.getLong(1) + " / " + rs.getString(2) + " / " + rs.getInt(4) + " / " + + rs.getString(5) + " / " + rs.getTimestamp(6, UTC_CALENDAR) + " / " + + rs.getTimestamp(7, UTC_CALENDAR) + " vs " + (new Timestamp(delayTime)) + " / " + + rs.getTimestamp(8, UTC_CALENDAR) + " vs " + (new Timestamp(timeoutTime))); + } + rs = close(rs); + ps = close(ps); + + } catch (SQLException e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + throw e; + + } finally { + rs = close(rs); + ps = close(ps); + conn = close(conn); + } + } - } catch (SQLException e) { - throw new ServiceExecutionException( - "Failed to dequeue follow-up task", e); + /** + * Fetches at most the specified number of follow-up tasks from the backing + * SQLite persistent store. + * + * @param count The suggested number of follow-up tasks to retrieve from + * persistent storage. + * + * @return The {@link List} of dequeued follow-up tasks. + * + * @throws ServiceExecutionException If a failure occurs. + */ + @Override + protected synchronized List dequeueFollowUpTasks(int count) throws ServiceExecutionException { + Connection conn = null; + boolean success = false; + try { + // get a connection + conn = this.getConnection(); + + // first release any expired leases + int released = this.releaseExpiredLeases(conn); + + if (released > 0) { + synchronized (this.getStatisticsMonitor()) { + // System.err.println("EXPIRED LEASE ON " + rowCount + " FOLLOW UP TASKS"); + this.totalExpiredFollowUpTaskCount += released; + } + } + + // generate a unique lease ID + String leaseId = this.generateLeaseId(); + + // lease the follow-up tasks + int leasedCount = this.leaseFollowUpTasks(conn, count, leaseId); + + // this.dumpFollowUpTable(); + + // check if no rows were updated + if (leasedCount == 0) { + return new ArrayList<>(0); + } + + // now get the leased rows + List result = this.getLeasedFollowUpTasks(conn, leaseId); + + // commit the transaction + conn.commit(); + success = true; + + // return the result list + return result; + + } catch (SQLException e) { + throw new ServiceExecutionException("Failed to dequeue follow-up task", e); + + } finally { + if (!success) { + rollback(conn); + } + conn = close(conn); + } + } - } finally { - if (!success) { - rollback(conn); - } - conn = close(conn); + /** + * Releases any previously obtained leases on follow tasks that have expired. + * This makes it possible to retrieve those follow-up tasks again from the + * database. The assumption is that if the lease has expired then they are no + * longer enqueued for processing and the lease is probably from an aborted + * process that is no longer running. + * + * @param conn The {@link Connection} to use. + * @return The number of tasks for which the leases had expired. + * @throws SQLException If a JDBC failure occurs. + */ + protected int releaseExpiredLeases(Connection conn) throws SQLException { + { + DatabaseType dbType = this.getDatabaseType(); + + PreparedStatement ps = null; + try { + // first release the lease on any task where the lease has expired + ps = conn.prepareStatement("UPDATE sz_follow_up_tasks " + "SET lease_id = NULL, expire_lease_at = NULL " + + "WHERE lease_id IS NOT NULL " + "AND expire_lease_at < " + dbType.getTimestampBindingSQL()); + + // don't be too aggressive on expiring leases + long now = System.currentTimeMillis(); + long leaseExpire = now - (this.getFollowUpTimeout() / 2); + Timestamp expireTime = new Timestamp(leaseExpire); + + dbType.setTimestamp(ps, 1, expireTime); + + return ps.executeUpdate(); + + } finally { + ps = close(ps); + } + } } - } - - /** - * Releases any previously obtained leases on follow tasks that have expired. - * This makes it possible to retrieve those follow-up tasks again from the - * database. The assumption is that if the lease has expired then they are - * no longer enqueued for processing and the lease is probably from an - * aborted process that is no longer running. - * - * @param conn The {@link Connection} to use. - * @return The number of tasks for which the leases had expired. - * @throws SQLException If a JDBC failure occurs. - */ - protected int releaseExpiredLeases(Connection conn) - throws SQLException - { - { - DatabaseType dbType = this.getDatabaseType(); - - PreparedStatement ps = null; - try { - // first release the lease on any task where the lease has expired - ps = conn.prepareStatement( - "UPDATE sz_follow_up_tasks " - + "SET lease_id = NULL, expire_lease_at = NULL " - + "WHERE lease_id IS NOT NULL " - + "AND expire_lease_at < " + dbType.getTimestampBindingSQL()); - - // don't be too aggressive on expiring leases - long now = System.currentTimeMillis(); - long leaseExpire = now - (this.getFollowUpTimeout() / 2); - Timestamp expireTime = new Timestamp(leaseExpire); - - dbType.setTimestamp(ps, 1, expireTime); - - return ps.executeUpdate(); - - } finally { - ps = close(ps); - } + + /** + * Marks the specified number of unleased follow-up tasks as leased with the + * specified lease ID using the specified {@link Connection}. + * + * @param conn The {@link Connection} to use. + * @param limit The upper-limit on the number of follow-up tasks to lease. + * @param leaseId The lease ID to use for marking the tasks as leased. + * @return The actual number of follow-up tasks that were leased. + * @throws SQLException If a JDBC failure occurs. + */ + protected int leaseFollowUpTasks(Connection conn, int limit, String leaseId) throws SQLException { + DatabaseType dbType = this.getDatabaseType(); + + PreparedStatement ps = null; + try { + // count the non-follow-up tasks + int taskCount = this.getPendingTaskCount() + this.getPostponedTaskCount(); + if (taskCount == 0) { + logDebug("FOLLOW-UP LEASE: Foregoing full follow-up delay since " + + "it appears there are no other tasks to handle."); + } + long followUpOffset = (taskCount == 0) ? 0L : this.getFollowUpDelay(); + + // don't be too aggressive on expiring leases + long now = System.currentTimeMillis(); + long delayMillis = now - followUpOffset; + long timeoutMillis = now - this.getFollowUpTimeout(); + Timestamp delayTime = new Timestamp(delayMillis); + Timestamp timeoutTime = new Timestamp(timeoutMillis); + + // now let's lease some new follow up tasks + ps = conn.prepareStatement("UPDATE sz_follow_up_tasks " + "SET lease_id = ?, " + "expire_lease_at = " + + dbType.getTimestampBindingSQL() + " " + + "WHERE task_id IN (SELECT task_id FROM sz_follow_up_tasks " + + "WHERE lease_id IS NULL AND expire_lease_at IS NULL " + "AND (modified_on < " + + dbType.getTimestampBindingSQL() + " " + "OR created_on < " + dbType.getTimestampBindingSQL() + + ") " + "ORDER BY created_on " + "LIMIT ?)"); + + long leaseExpire = now + (2 * this.getFollowUpTimeout()); + Timestamp expireTime = new Timestamp(leaseExpire); + + ps.setString(1, leaseId); + dbType.setTimestamp(ps, 2, expireTime); + dbType.setTimestamp(ps, 3, delayTime); + dbType.setTimestamp(ps, 4, timeoutTime); + ps.setInt(5, limit); + + // execute the update and return the number of affected rows + return ps.executeUpdate(); + + } finally { + ps = close(ps); + } } - } - - /** - * Marks the specified number of unleased follow-up tasks as leased with the - * specified lease ID using the specified {@link Connection}. - * - * @param conn The {@link Connection} to use. - * @param limit The upper-limit on the number of follow-up tasks to lease. - * @param leaseId The lease ID to use for marking the tasks as leased. - * @return The actual number of follow-up tasks that were leased. - * @throws SQLException If a JDBC failure occurs. - */ - protected int leaseFollowUpTasks(Connection conn, int limit, String leaseId) - throws SQLException - { - DatabaseType dbType = this.getDatabaseType(); - - PreparedStatement ps = null; - try { - // count the non-follow-up tasks - int taskCount = this.getPendingTaskCount() + this.getPostponedTaskCount(); - if (taskCount == 0) { - logDebug("FOLLOW-UP LEASE: Foregoing full follow-up delay since " - + "it appears there are no other tasks to handle."); - } - long followUpOffset = (taskCount == 0) ? 0L : this.getFollowUpDelay(); - - // don't be too aggressive on expiring leases - long now = System.currentTimeMillis(); - long delayMillis = now - followUpOffset; - long timeoutMillis = now - this.getFollowUpTimeout(); - Timestamp delayTime = new Timestamp(delayMillis); - Timestamp timeoutTime = new Timestamp(timeoutMillis); - - // now let's lease some new follow up tasks - ps = conn.prepareStatement( - "UPDATE sz_follow_up_tasks " - + "SET lease_id = ?, " - + "expire_lease_at = " + dbType.getTimestampBindingSQL() + " " - + "WHERE task_id IN (SELECT task_id FROM sz_follow_up_tasks " - + "WHERE lease_id IS NULL AND expire_lease_at IS NULL " - + "AND (modified_on < " + dbType.getTimestampBindingSQL() + " " - + "OR created_on < " + dbType.getTimestampBindingSQL() + ") " - + "ORDER BY created_on " - + "LIMIT ?)"); - - long leaseExpire = now + (2 * this.getFollowUpTimeout()); - Timestamp expireTime = new Timestamp(leaseExpire); - - ps.setString(1, leaseId); - dbType.setTimestamp(ps, 2, expireTime); - dbType.setTimestamp(ps, 3, delayTime); - dbType.setTimestamp(ps, 4, timeoutTime); - ps.setInt(5, limit); - - // execute the update and return the number of affected rows - return ps.executeUpdate(); - - } finally { - ps = close(ps); + + /** + * Gets the {@link List} of {@link ScheduledTask} instances describing the + * follow-up tasks in the database that are marked as leased with the specified + * lease ID. + * + * @param conn The {@link Connection} to use. + * @param leaseId The lease ID of the follow-up tasks to retrieve. + * @return The {@link List} of {@link ScheduledTask} instances describing the + * leased follow-up tasks. + * @throws SQLException If a JDBC failure occurs. + */ + protected List getLeasedFollowUpTasks(Connection conn, String leaseId) throws SQLException { + PreparedStatement ps = null; + ResultSet rs = null; + try { + // set the leased rows + ps = conn.prepareStatement("SELECT " + "task_id, expire_lease_at, multiplicity, json_text, created_on " + + "FROM sz_follow_up_tasks " + "WHERE lease_id = ?"); + + ps.setString(1, leaseId); + + int fetchCount = this.getFollowUpFetchCount(); + List result = new ArrayList<>(fetchCount); + rs = ps.executeQuery(); + while (rs.next()) { + long taskId = rs.getLong(1); + Timestamp expTime = rs.getTimestamp(2, UTC_CALENDAR); + int multiplicity = rs.getInt(3); + String jsonText = rs.getString(4); + Timestamp createdOn = rs.getTimestamp(5, UTC_CALENDAR); + + String followUpId = taskId + ":" + leaseId; + long now = System.currentTimeMillis(); + long elapsedSinceCreation = now - createdOn.getTime(); + + ScheduledTask task = new ScheduledTask(jsonText, followUpId, multiplicity, expTime.getTime(), + elapsedSinceCreation); + + result.add(task); + } + + // return the result list + return result; + + } finally { + rs = close(rs); + ps = close(ps); + } } - } - - - /** - * Gets the {@link List} of {@link ScheduledTask} instances describing the - * follow-up tasks in the database that are marked as leased with the - * specified lease ID. - * - * @param conn The {@link Connection} to use. - * @param leaseId The lease ID of the follow-up tasks to retrieve. - * @return The {@link List} of {@link ScheduledTask} instances describing - * the leased follow-up tasks. - * @throws SQLException If a JDBC failure occurs. - */ - protected List getLeasedFollowUpTasks(Connection conn, - String leaseId) - throws SQLException - { - PreparedStatement ps = null; - ResultSet rs = null; - try { - // set the leased rows - ps = conn.prepareStatement( - "SELECT " - + "task_id, expire_lease_at, multiplicity, json_text, created_on " - + "FROM sz_follow_up_tasks " - + "WHERE lease_id = ?"); - - ps.setString(1, leaseId); - - int fetchCount = this.getFollowUpFetchCount(); - List result = new ArrayList<>(fetchCount); - rs = ps.executeQuery(); - while (rs.next()) { - long taskId = rs.getLong(1); - Timestamp expTime = rs.getTimestamp(2, UTC_CALENDAR); - int multiplicity = rs.getInt(3); - String jsonText = rs.getString(4); - Timestamp createdOn = rs.getTimestamp(5, UTC_CALENDAR); - - String followUpId = taskId + ":" + leaseId; - long now = System.currentTimeMillis(); - long elapsedSinceCreation = now - createdOn.getTime(); - - ScheduledTask task = new ScheduledTask(jsonText, - followUpId, - multiplicity, - expTime.getTime(), - elapsedSinceCreation); - - result.add(task); - } - - // return the result list - return result; - - } finally { - rs = close(rs); - ps = close(ps); + + /** + * Creates a virtually unique lease ID. + * + * @return A new lease ID to use. + */ + protected String generateLeaseId() { + long pid = ProcessHandle.current().pid(); + StringBuilder sb = new StringBuilder(); + sb.append(pid).append("|").append(Instant.now().toString()).append("|"); + sb.append(TextUtilities.randomAlphanumericText(50)); + return sb.toString(); } - } - - /** - * Creates a virtually unique lease ID. - * - * @return A new lease ID to use. - */ - protected String generateLeaseId() { - long pid = ProcessHandle.current().pid(); - StringBuilder sb = new StringBuilder(); - sb.append(pid).append("|").append(Instant.now().toString()).append("|"); - sb.append(TextUtilities.randomAlphanumericText(50)); - return sb.toString(); - } - - /** - * Implemented to renew the lease on the specified tasks as well as any others - * that were dequeued with the same lease. - * - * {@inheritDoc} - */ - protected synchronized void renewFollowUpTasks(List tasks) - throws ServiceExecutionException - { - Connection conn = null; - boolean success = false; - try { - conn = this.getConnection(); - - Set leaseIdSet = new LinkedHashSet<>(); - for (ScheduledTask task : tasks) { - String followUpId = task.getFollowUpId(); - - int index = followUpId.indexOf(":"); - String leaseId = followUpId.substring(index + 1); - - leaseIdSet.add(leaseId); - } - - long now = System.currentTimeMillis(); - long leaseExpire = now + (2 * this.getFollowUpTimeout()); - Timestamp expireTime = new Timestamp(leaseExpire); - - int updateCount = this.updateLeaseExpiration(conn, - expireTime, - leaseIdSet); - - if (updateCount != tasks.size()) { - logWarning("WARNING: Renewed lease on " + updateCount - + " follow-up tasks when expected to update " - + tasks.size() + " follow-up tasks: " - + leaseIdSet); - } - - // commit the change - conn.commit(); - success = true; - - } catch (SQLException e) { - throw new ServiceExecutionException( - "Failed to enqueue follow-up task", e); - - } finally { - if (!success) { - rollback(conn); - } - conn = close(conn); + + /** + * Implemented to renew the lease on the specified tasks as well as any others + * that were dequeued with the same lease. + * + * {@inheritDoc} + */ + protected synchronized void renewFollowUpTasks(List tasks) throws ServiceExecutionException { + Connection conn = null; + boolean success = false; + try { + conn = this.getConnection(); + + Set leaseIdSet = new LinkedHashSet<>(); + for (ScheduledTask task : tasks) { + String followUpId = task.getFollowUpId(); + + int index = followUpId.indexOf(":"); + String leaseId = followUpId.substring(index + 1); + + leaseIdSet.add(leaseId); + } + + long now = System.currentTimeMillis(); + long leaseExpire = now + (2 * this.getFollowUpTimeout()); + Timestamp expireTime = new Timestamp(leaseExpire); + + int updateCount = this.updateLeaseExpiration(conn, expireTime, leaseIdSet); + + if (updateCount != tasks.size()) { + logWarning("WARNING: Renewed lease on " + updateCount + " follow-up tasks when expected to update " + + tasks.size() + " follow-up tasks: " + leaseIdSet); + } + + // commit the change + conn.commit(); + success = true; + + } catch (SQLException e) { + throw new ServiceExecutionException("Failed to enqueue follow-up task", e); + + } finally { + if (!success) { + rollback(conn); + } + conn = close(conn); + } } - } - - /** - * Updates the expiration time on the follow-up tasks with lease ID's in the - * specified {@link Set} to the specified expiration time using the specified - * {@link Connection}. - * - * @param conn The {@link Connection} to use. - * @param expireTime The new expiration time as a {@link Timestamp}. - * @param leaseIdSet The {@link Set} of {@link String} lease ID's. - * @return The number of follow-up tasks updated. - * @throws SQLException If a JDBC failure occurs. - */ - protected int updateLeaseExpiration(Connection conn, - Timestamp expireTime, - Set leaseIdSet) - throws SQLException - { - DatabaseType dbType = this.getDatabaseType(); - - PreparedStatement ps = null; - try { - int count = leaseIdSet.size(); - - // build the SQL - StringBuilder sb = new StringBuilder( - "UPDATE sz_follow_up_tasks " - + "SET expire_lease_at = " + dbType.getTimestampBindingSQL() + " " - + "WHERE lease_id IN ("); - String prefix = ""; - for (int index = 0; index < count; index++) { - sb.append(prefix).append("?"); - prefix = ", "; - } - sb.append(")"); - - // prepare the statement - ps = conn.prepareStatement(sb.toString()); - - // bind the timestamp for the new expire time - dbType.setTimestamp(ps, 1, expireTime); - - // bind the lease ID's - int index = 2; - for (String leaseId : leaseIdSet) { - ps.setString(index++, leaseId); - } - - // execute the update and return the row count - return ps.executeUpdate(); - - } finally { - ps = close(ps); + + /** + * Updates the expiration time on the follow-up tasks with lease ID's in the + * specified {@link Set} to the specified expiration time using the specified + * {@link Connection}. + * + * @param conn The {@link Connection} to use. + * @param expireTime The new expiration time as a {@link Timestamp}. + * @param leaseIdSet The {@link Set} of {@link String} lease ID's. + * @return The number of follow-up tasks updated. + * @throws SQLException If a JDBC failure occurs. + */ + protected int updateLeaseExpiration(Connection conn, Timestamp expireTime, Set leaseIdSet) throws SQLException { + DatabaseType dbType = this.getDatabaseType(); + + PreparedStatement ps = null; + try { + int count = leaseIdSet.size(); + + // build the SQL + StringBuilder sb = new StringBuilder("UPDATE sz_follow_up_tasks " + "SET expire_lease_at = " + + dbType.getTimestampBindingSQL() + " " + "WHERE lease_id IN ("); + String prefix = ""; + for (int index = 0; index < count; index++) { + sb.append(prefix).append("?"); + prefix = ", "; + } + sb.append(")"); + + // prepare the statement + ps = conn.prepareStatement(sb.toString()); + + // bind the timestamp for the new expire time + dbType.setTimestamp(ps, 1, expireTime); + + // bind the lease ID's + int index = 2; + for (String leaseId : leaseIdSet) { + ps.setString(index++, leaseId); + } + + // execute the update and return the row count + return ps.executeUpdate(); + + } finally { + ps = close(ps); + } } - } - - /** - * Implemented to delete the specified follow-up task from persistent storage. - * - * {@inheritDoc} - */ - protected synchronized void completeFollowUpTask(ScheduledTask task) - throws ServiceExecutionException - { - Connection conn = null; - boolean success = false; - try { - conn = this.getConnection(); - - boolean deleted = this.deleteFollowUpTask(conn, task); - - if (!deleted) { - logWarning("WARNING: Follow-up task was already completed: ", - task); - } - - // commit the transaction - conn.commit(); - success = true; - - } catch (SQLException e) { - throw new ServiceExecutionException( - "Failed to delete completed follow-up task", e); - - } finally { - if (!success) { - rollback(conn); - } - conn = close(conn); + + /** + * Implemented to delete the specified follow-up task from persistent storage. + * + * {@inheritDoc} + */ + protected synchronized void completeFollowUpTask(ScheduledTask task) throws ServiceExecutionException { + Connection conn = null; + boolean success = false; + try { + conn = this.getConnection(); + + boolean deleted = this.deleteFollowUpTask(conn, task); + + if (!deleted) { + logWarning("WARNING: Follow-up task was already completed: ", task); + } + + // commit the transaction + conn.commit(); + success = true; + + } catch (SQLException e) { + throw new ServiceExecutionException("Failed to delete completed follow-up task", e); + + } finally { + if (!success) { + rollback(conn); + } + conn = close(conn); + } } - } - - /** - * Deletes a follow-up task (typically once it hqs been completed). This is - * called from the default {@link #completeFollowUpTask(ScheduledTask)} - * implementation. - * - * @param conn The {@link Connection} to use. - * @param task The {@link ScheduledTask} describing the task to delete. - * @return true if a follow-up was deleted and false - * if not (usually because it was already deleted). - * @throws SQLException If a JDBC failure occurs. - */ - protected boolean deleteFollowUpTask(Connection conn, - ScheduledTask task) - throws SQLException - { - PreparedStatement ps = null; - try { - ps = conn.prepareStatement( - "DELETE FROM sz_follow_up_tasks WHERE task_id = ?"); - - String followUpId = task.getFollowUpId(); - int index = followUpId.indexOf(":"); - long taskId = Long.parseLong(followUpId.substring(0, index)); - - ps.setLong(1, taskId); - - int rowCount = ps.executeUpdate(); - - if (rowCount > 1) { - throw new SQLException( - "Multiple follow-up rows deleted when one was expected: " - + rowCount); - } - - return (rowCount == 1); - - } finally { - ps = close(ps); + + /** + * Deletes a follow-up task (typically once it hqs been completed). This is + * called from the default {@link #completeFollowUpTask(ScheduledTask)} + * implementation. + * + * @param conn The {@link Connection} to use. + * @param task The {@link ScheduledTask} describing the task to delete. + * @return true if a follow-up was deleted and false + * if not (usually because it was already deleted). + * @throws SQLException If a JDBC failure occurs. + */ + protected boolean deleteFollowUpTask(Connection conn, ScheduledTask task) throws SQLException { + PreparedStatement ps = null; + try { + ps = conn.prepareStatement("DELETE FROM sz_follow_up_tasks WHERE task_id = ?"); + + String followUpId = task.getFollowUpId(); + int index = followUpId.indexOf(":"); + long taskId = Long.parseLong(followUpId.substring(0, index)); + + ps.setLong(1, taskId); + + int rowCount = ps.executeUpdate(); + + if (rowCount > 1) { + throw new SQLException("Multiple follow-up rows deleted when one was expected: " + rowCount); + } + + return (rowCount == 1); + + } finally { + ps = close(ps); + } } - } - - /** - * Gets the total number of follow-up tasks that were dequeued and expired - * before being handled. - * - * @return The total number of follow-up tasks that were dequeued and expired - * before being handled. - */ - public long getTotalExpiredFollowUpTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.totalExpiredFollowUpTaskCount; + + /** + * Gets the total number of follow-up tasks that were dequeued and expired + * before being handled. + * + * @return The total number of follow-up tasks that were dequeued and expired + * before being handled. + */ + public long getTotalExpiredFollowUpTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.totalExpiredFollowUpTaskCount; + } } - } } diff --git a/src/main/java/com/senzing/listener/service/scheduling/AbstractSchedulingService.java b/src/main/java/com/senzing/listener/service/scheduling/AbstractSchedulingService.java index 5b36fa8..5dfe0a3 100644 --- a/src/main/java/com/senzing/listener/service/scheduling/AbstractSchedulingService.java +++ b/src/main/java/com/senzing/listener/service/scheduling/AbstractSchedulingService.java @@ -28,3726 +28,3610 @@ * Provides an abstract base class for implementing {@link SchedulingService}. */ public abstract class AbstractSchedulingService implements SchedulingService { - /** - * The number of milliseconds to wait for the task handler to be ready. - */ - private static final long READY_TIMEOUT = 2000L; - - /** - * Constant for nanosecond/millisecond conversion. - */ - private static final long ONE_MILLION = 1000000L; - - /** - * The default concurrency. The default is to serialize task handling in - * a single thread. - */ - public static final int DEFAULT_CONCURRENCY = 1; - - /** - * The default number of milliseconds for the {@link #POSTPONED_TIMEOUT_KEY} - * initialization parameter. - */ - public static final long DEFAULT_POSTPONED_TIMEOUT = 50L; - - /** - * The default number of milliseconds for the {@link #STANDARD_TIMEOUT_KEY} - * initialization parameter if not otherwise specified. - */ - public static final long DEFAULT_STANDARD_TIMEOUT = 1500L; - - /** - * The default number of milliseconds for the {@link #FOLLOW_UP_DELAY_KEY} - * initialization parameter if not otherwise specified. - */ - public static final long DEFAULT_FOLLOW_UP_DELAY = 10000L; - - /** - * The default maximum number of milliseconds for the {@link - * #FOLLOW_UP_TIMEOUT_KEY} initialization parameter if not otherwise - * specified. - */ - public static final long DEFAULT_FOLLOW_UP_TIMEOUT = 60000L; - - /** - * The default number of follow-up tasks to fetch from persistent storage at - * a time. - */ - public static final int DEFAULT_FOLLOW_UP_FETCH = 100; - - /** - * The config property key for configuring the concurrency. - */ - public static final String CONCURRENCY_KEY = "concurrency"; - - /** - * The initialization parameter to specify the number of milliseconds to - * sleep between checks on the locks required for tasks that have been - * postponed due to contention. If not configured then the value is set to - * {@link #DEFAULT_POSTPONED_TIMEOUT}. If the value is specified it should - * be non-negative. - */ - public static final String POSTPONED_TIMEOUT_KEY = "postponedTimeout"; - - /** - * The initialization parameter to specify the number of milliseconds to - * sleep between checking to see if task handling should cease. This - * timeout is used when there are no postponed tasks due to contention. - * If not configured then the value is set to {@link - * #DEFAULT_STANDARD_TIMEOUT}. If the value is specified it should be - * non-negative. - */ - public static final String STANDARD_TIMEOUT_KEY = "standardTimeout"; - - /** - * The initialization parameter to specify the number of milliseconds to - * delay before attempting to execute a follow-up task. This delay is - * used to give the opportunity to receive duplicate follow-up tasks that - * can be collapsed. Whenever a duplicate is collapsed, the delay timer - * starts over unless the {@linkplain #FOLLOW_UP_TIMEOUT_KEY maximum - * follow-up deferral time} has been reached. If not configured then - * the value is set to {@link #DEFAULT_FOLLOW_UP_DELAY}. If the value is - * specified it should be non-negative. - */ - public static final String FOLLOW_UP_DELAY_KEY = "followUpDelay"; - - /** - * The initialization parameter to specify the maximum number of milliseconds - * to defer a follow-up task. Once a follow-up task has been deferred this - * number of milliseconds it will no longer be purposely delayed to wait for - * additional duplicates to be scheduled and collapsed. This is also the - * amount of time used to cache a follow-up task from persistent storage - * before considering the cached version expired and make it available again - * from persistent storage. If not configured then the value is set to {@link - * #DEFAULT_FOLLOW_UP_TIMEOUT}. If the value is specified it should be - * non-negative and must be greater than the delay time specified by - * {@link #FOLLOW_UP_DELAY_KEY}. - */ - public static final String FOLLOW_UP_TIMEOUT_KEY = "followUpTimeout"; - - /** - * The initialization parameter to specify the maximum number of follow-up - * tasks to retrieve from persistent storage at a time to refill the - * in-memory cache. The retrieved tasks should not be returned from - * persistent storage again until after the {@linkplain #FOLLOW_UP_TIMEOUT_KEY - * follow-up timeout} has elapsed and after it has elapsed, the in-memory - * cache should be considered expired. If not configured then the value is - * to {@link #DEFAULT_FOLLOW_UP_FETCH}. If the value is specified it should - * be a positive number. - */ - public static final String FOLLOW_UP_FETCH_KEY = "followUpFetch"; - - /** - * The initialization parameter used by the default implementation of - * {@link #initLockingService(JsonObject)} to specify the Java class name - * of the {@link LockingService} to use. If the default implementation of - * {@link #initLockingService(JsonObject)} is overridden, then this key may - * have no effect in the derived implementation. - */ - public static final String LOCKING_SERVICE_CLASS_KEY = "lockingService"; - - /** - * The default value for the {@link #LOCKING_SERVICE_CLASS_KEY} if the value - * is not specified. This is the class name for {@link - * ProcessScopeLockingService}. - */ - public static final String DEFAULT_LOCKING_SERVICE_CLASS_NAME = ProcessScopeLockingService.class.getName(); - - /** - * The initialization parameter referencing a JSON object or {@link String} - * that represents the configuration for the {@link LockingService} instance - * created by the default implementation of {@link - * #initLockingService(JsonObject)} using the {@link - * #LOCKING_SERVICE_CLASS_KEY} init parameter. If the default implementation - * of {@link #initLockingService(JsonObject)} is overridden, then this key may - * have no effect in the derived implementation. - */ - public static final String LOCKING_SERVICE_CONFIG_KEY = "lockingServiceConfig"; - - /** - * Millisecond units constant for {@link Stat} instances. - */ - private static final String MILLISECOND_UNITS = "ms"; - - /** - * Thread units constant for {@link Stat} instances. - */ - private static final String THREAD_UNITS = "threads"; - - /** - * Task units constant for {@link Stat} instances. - */ - private static final String TASK_UNITS = "tasks"; - - /** - * Task group units constant for {@link Stat} instances. - */ - private static final String TASK_GROUP_UNITS = "task groups"; - - /** - * Call units constant for {@link Stat} instances. - */ - private static final String CALL_UNITS = "calls"; - - /** - * Tasks per call units constant for {@link Stat} instances. - */ - private static final String TASKS_PER_CALL_UNITS = "tasks per call"; - - /** - * Enumerates the various task types. - */ - private enum TaskType { /** - * Pending tasks that have never been handled. + * The number of milliseconds to wait for the task handler to be ready. */ - PENDING, + private static final long READY_TIMEOUT = 2000L; /** - * Postponed tasks that were previously attempted but postponed. + * Constant for nanosecond/millisecond conversion. */ - POSTPONED, + private static final long ONE_MILLION = 1000000L; /** - * Follow-up tasks that were scheduled in response to previous tasks. + * The default concurrency. The default is to serialize task handling in a + * single thread. */ - FOLLOW_UP; - } + public static final int DEFAULT_CONCURRENCY = 1; - /** - * The various keys used for timing operations. - */ - public enum Stat implements Statistic { /** - * The number of worker threads used to asynchronously handle the tasks. + * The default number of milliseconds for the {@link #POSTPONED_TIMEOUT_KEY} + * initialization parameter. */ - concurrency(THREAD_UNITS), + public static final long DEFAULT_POSTPONED_TIMEOUT = 50L; /** - * The timeout to use when waiting for new tasks to show up when there - * are no postponed tasks. When there are postponed tasks then - * {@link #postponedTimeout} is used. + * The default number of milliseconds for the {@link #STANDARD_TIMEOUT_KEY} + * initialization parameter if not otherwise specified. */ - standardTimeout(MILLISECOND_UNITS), + public static final long DEFAULT_STANDARD_TIMEOUT = 1500L; /** - * The number of milliseconds to sleep between checks on the locks required - * for tasks that have been postponed due to contention. + * The default number of milliseconds for the {@link #FOLLOW_UP_DELAY_KEY} + * initialization parameter if not otherwise specified. */ - postponedTimeout(MILLISECOND_UNITS), + public static final long DEFAULT_FOLLOW_UP_DELAY = 10000L; /** - * The number of milliseconds to delay a follow-up task initially (to allow - * duplicates to be collapsed with it) and after each time a duplicate is - * found. The total deferral of the follow-up task is governed by the - * {@link #followUpTimeout} value. + * The default maximum number of milliseconds for the + * {@link #FOLLOW_UP_TIMEOUT_KEY} initialization parameter if not otherwise + * specified. */ - followUpDelay(MILLISECOND_UNITS), + public static final long DEFAULT_FOLLOW_UP_TIMEOUT = 60000L; /** - * The maximum number of milliseconds to defer a follow-up task while - * waiting for duplicate tasks to be collapsed with it. + * The default number of follow-up tasks to fetch from persistent storage at a + * time. */ - followUpTimeout(MILLISECOND_UNITS), + public static final int DEFAULT_FOLLOW_UP_FETCH = 100; /** - * The average number of milliseconds from when a non-follow-up task is - * scheduled until it has been handled. + * The config property key for configuring the concurrency. */ - averageTaskTime(MILLISECOND_UNITS), + public static final String CONCURRENCY_KEY = "concurrency"; /** - * The average number of milliseconds from when a task group has its first - * task scheduled until all of its tasks have been handled. + * The initialization parameter to specify the number of milliseconds to sleep + * between checks on the locks required for tasks that have been postponed due + * to contention. If not configured then the value is set to + * {@link #DEFAULT_POSTPONED_TIMEOUT}. If the value is specified it should be + * non-negative. */ - averageTaskGroupTime(MILLISECOND_UNITS), + public static final String POSTPONED_TIMEOUT_KEY = "postponedTimeout"; /** - * The longest amount of time (in milliseconds) for when a non-followup - * task was scheduled until it was completely processed. + * The initialization parameter to specify the number of milliseconds to sleep + * between checking to see if task handling should cease. This timeout is used + * when there are no postponed tasks due to contention. If not configured then + * the value is set to {@link #DEFAULT_STANDARD_TIMEOUT}. If the value is + * specified it should be non-negative. */ - longestTaskTime(MILLISECOND_UNITS), + public static final String STANDARD_TIMEOUT_KEY = "standardTimeout"; /** - * The longest amount of time (in milliseconds) for when a non-followup - * task was scheduled until it was completely processed. + * The initialization parameter to specify the number of milliseconds to delay + * before attempting to execute a follow-up task. This delay is used to give the + * opportunity to receive duplicate follow-up tasks that can be collapsed. + * Whenever a duplicate is collapsed, the delay timer starts over unless the + * {@linkplain #FOLLOW_UP_TIMEOUT_KEY maximum follow-up deferral time} has been + * reached. If not configured then the value is set to + * {@link #DEFAULT_FOLLOW_UP_DELAY}. If the value is specified it should be + * non-negative. */ - longestTaskGroupTime(MILLISECOND_UNITS), + public static final String FOLLOW_UP_DELAY_KEY = "followUpDelay"; /** - * The number of non-follow-up tasks that have made the round trip from - * being scheduled to the point where they are completely handled. + * The initialization parameter to specify the maximum number of milliseconds to + * defer a follow-up task. Once a follow-up task has been deferred this number + * of milliseconds it will no longer be purposely delayed to wait for additional + * duplicates to be scheduled and collapsed. This is also the amount of time + * used to cache a follow-up task from persistent storage before considering the + * cached version expired and make it available again from persistent storage. + * If not configured then the value is set to + * {@link #DEFAULT_FOLLOW_UP_TIMEOUT}. If the value is specified it should be + * non-negative and must be greater than the delay time specified by + * {@link #FOLLOW_UP_DELAY_KEY}. */ - taskCompleteCount(TASK_UNITS), + public static final String FOLLOW_UP_TIMEOUT_KEY = "followUpTimeout"; /** - * The number of non-follow-up tasks that have been completed successfully. + * The initialization parameter to specify the maximum number of follow-up tasks + * to retrieve from persistent storage at a time to refill the in-memory cache. + * The retrieved tasks should not be returned from persistent storage again + * until after the {@linkplain #FOLLOW_UP_TIMEOUT_KEY follow-up timeout} has + * elapsed and after it has elapsed, the in-memory cache should be considered + * expired. If not configured then the value is to + * {@link #DEFAULT_FOLLOW_UP_FETCH}. If the value is specified it should be a + * positive number. */ - taskSuccessCount(TASK_UNITS), + public static final String FOLLOW_UP_FETCH_KEY = "followUpFetch"; /** - * The number of non-follow-up tasks that have been completed with a - * failure. + * The initialization parameter used by the default implementation of + * {@link #initLockingService(JsonObject)} to specify the Java class name of the + * {@link LockingService} to use. If the default implementation of + * {@link #initLockingService(JsonObject)} is overridden, then this key may have + * no effect in the derived implementation. */ - taskFailureCount(TASK_UNITS), + public static final String LOCKING_SERVICE_CLASS_KEY = "lockingService"; /** - * The number of non-follow-up tasks that have been aborted. + * The default value for the {@link #LOCKING_SERVICE_CLASS_KEY} if the value is + * not specified. This is the class name for {@link ProcessScopeLockingService}. */ - taskAbortCount(TASK_UNITS), + public static final String DEFAULT_LOCKING_SERVICE_CLASS_NAME = ProcessScopeLockingService.class.getName(); /** - * The number of follow-up tasks that have made the round trip from - * being scheduled to the point where they are completely handled. + * The initialization parameter referencing a JSON object or {@link String} that + * represents the configuration for the {@link LockingService} instance created + * by the default implementation of {@link #initLockingService(JsonObject)} + * using the {@link #LOCKING_SERVICE_CLASS_KEY} init parameter. If the default + * implementation of {@link #initLockingService(JsonObject)} is overridden, then + * this key may have no effect in the derived implementation. */ - followUpCompleteCount(TASK_UNITS), + public static final String LOCKING_SERVICE_CONFIG_KEY = "lockingServiceConfig"; /** - * The number of follow-up tasks that have been completed successfully. + * Millisecond units constant for {@link Stat} instances. */ - followUpSuccessCount(TASK_UNITS), + private static final String MILLISECOND_UNITS = "ms"; /** - * The number of follow-up tasks that have been completed with a failure. + * Thread units constant for {@link Stat} instances. */ - followUpFailureCount(TASK_UNITS), + private static final String THREAD_UNITS = "threads"; /** - * The average number of milliseconds for a task to be handled by the - * {@link TaskHandler} via {@link - * TaskHandler#handleTask(String,Map,int,Scheduler)}. + * Task units constant for {@link Stat} instances. */ - averageHandleTask(MILLISECOND_UNITS), + private static final String TASK_UNITS = "tasks"; /** - * The number of times the {@link - * TaskHandler#handleTask(String,Map,int,Scheduler)} method has been called - * to handle a task (follow-up or not). + * Task group units constant for {@link Stat} instances. */ - handleTaskCount(CALL_UNITS), + private static final String TASK_GROUP_UNITS = "task groups"; /** - * The number of times that the {@link - * TaskHandler#handleTask(String,Map,int,Scheduler)} has been called - * successfully (i.e.: without any exceptions) to handle a task (follow-up - * or not). + * Call units constant for {@link Stat} instances. */ - handleTaskSuccessCount(CALL_UNITS), + private static final String CALL_UNITS = "calls"; /** - * The number of times that the {@link - * TaskHandler#handleTask(String,Map,int,Scheduler)} has been called - * unsuccessfully (i.e.: with an exceptions being thrown) to handle a task - * (follow-up or not). + * Tasks per call units constant for {@link Stat} instances. */ - handleTaskFailureCount(CALL_UNITS), + private static final String TASKS_PER_CALL_UNITS = "tasks per call"; /** - * Gets the ratio of the number of times {@link - * TaskHandler#handleTask(String,Map,int,Scheduler)} has been called for - * follow-up tasks to number of times it has been called for all - * tasks that have been handled. + * Enumerates the various task types. */ - followUpHandleTaskRatio(null), + private enum TaskType { + /** + * Pending tasks that have never been handled. + */ + PENDING, + + /** + * Postponed tasks that were previously attempted but postponed. + */ + POSTPONED, + + /** + * Follow-up tasks that were scheduled in response to previous tasks. + */ + FOLLOW_UP; + } /** - * The number of non-followup tasks that have made the round trip from being - * scheduled to the point where they are completely handled. Some - * messages may make the round trip more than once if a failure occurs in - * processing part or all of the message. + * The various keys used for timing operations. */ - taskGroupCompleteCount(TASK_GROUP_UNITS), + public enum Stat implements Statistic { + /** + * The number of worker threads used to asynchronously handle the tasks. + */ + concurrency(THREAD_UNITS), + + /** + * The timeout to use when waiting for new tasks to show up when there are no + * postponed tasks. When there are postponed tasks then + * {@link #postponedTimeout} is used. + */ + standardTimeout(MILLISECOND_UNITS), + + /** + * The number of milliseconds to sleep between checks on the locks required for + * tasks that have been postponed due to contention. + */ + postponedTimeout(MILLISECOND_UNITS), + + /** + * The number of milliseconds to delay a follow-up task initially (to allow + * duplicates to be collapsed with it) and after each time a duplicate is found. + * The total deferral of the follow-up task is governed by the + * {@link #followUpTimeout} value. + */ + followUpDelay(MILLISECOND_UNITS), + + /** + * The maximum number of milliseconds to defer a follow-up task while waiting + * for duplicate tasks to be collapsed with it. + */ + followUpTimeout(MILLISECOND_UNITS), + + /** + * The average number of milliseconds from when a non-follow-up task is + * scheduled until it has been handled. + */ + averageTaskTime(MILLISECOND_UNITS), + + /** + * The average number of milliseconds from when a task group has its first task + * scheduled until all of its tasks have been handled. + */ + averageTaskGroupTime(MILLISECOND_UNITS), + + /** + * The longest amount of time (in milliseconds) for when a non-followup task was + * scheduled until it was completely processed. + */ + longestTaskTime(MILLISECOND_UNITS), + + /** + * The longest amount of time (in milliseconds) for when a non-followup task was + * scheduled until it was completely processed. + */ + longestTaskGroupTime(MILLISECOND_UNITS), + + /** + * The number of non-follow-up tasks that have made the round trip from being + * scheduled to the point where they are completely handled. + */ + taskCompleteCount(TASK_UNITS), + + /** + * The number of non-follow-up tasks that have been completed successfully. + */ + taskSuccessCount(TASK_UNITS), + + /** + * The number of non-follow-up tasks that have been completed with a failure. + */ + taskFailureCount(TASK_UNITS), + + /** + * The number of non-follow-up tasks that have been aborted. + */ + taskAbortCount(TASK_UNITS), + + /** + * The number of follow-up tasks that have made the round trip from being + * scheduled to the point where they are completely handled. + */ + followUpCompleteCount(TASK_UNITS), + + /** + * The number of follow-up tasks that have been completed successfully. + */ + followUpSuccessCount(TASK_UNITS), + + /** + * The number of follow-up tasks that have been completed with a failure. + */ + followUpFailureCount(TASK_UNITS), + + /** + * The average number of milliseconds for a task to be handled by the + * {@link TaskHandler} via + * {@link TaskHandler#handleTask(String,Map,int,Scheduler)}. + */ + averageHandleTask(MILLISECOND_UNITS), + + /** + * The number of times the + * {@link TaskHandler#handleTask(String,Map,int,Scheduler)} method has been + * called to handle a task (follow-up or not). + */ + handleTaskCount(CALL_UNITS), + + /** + * The number of times that the + * {@link TaskHandler#handleTask(String,Map,int,Scheduler)} has been called + * successfully (i.e.: without any exceptions) to handle a task (follow-up or + * not). + */ + handleTaskSuccessCount(CALL_UNITS), + + /** + * The number of times that the + * {@link TaskHandler#handleTask(String,Map,int,Scheduler)} has been called + * unsuccessfully (i.e.: with an exceptions being thrown) to handle a task + * (follow-up or not). + */ + handleTaskFailureCount(CALL_UNITS), + + /** + * Gets the ratio of the number of times + * {@link TaskHandler#handleTask(String,Map,int,Scheduler)} has been called for + * follow-up tasks to number of times it has been called for all tasks + * that have been handled. + */ + followUpHandleTaskRatio(null), + + /** + * The number of non-followup tasks that have made the round trip from being + * scheduled to the point where they are completely handled. Some messages may + * make the round trip more than once if a failure occurs in processing part or + * all of the message. + */ + taskGroupCompleteCount(TASK_GROUP_UNITS), + + /** + * The number of task groups that had all of their tasks handled successfully + * without any exceptions. + */ + taskGroupSuccessCount(TASK_GROUP_UNITS), + + /** + * The number of task groups that have completed but have had at least one + * failure with one of the associated tasks. + */ + taskGroupFailureCount(TASK_GROUP_UNITS), + + /** + * The average compression ratio of duplicate non-follow-up tasks. This is the + * number of total non-follow-up tasks handled divided by the number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} was called to + * handle those tasks. + */ + averageCompression(TASKS_PER_CALL_UNITS), + + /** + * The greatest compression ratio achieved by a single non-follow-up task. This + * the greatest number of duplicate non-follow-up tasks that were collapsed into + * a single task handling call to + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)}. + */ + greatestCompression(TASKS_PER_CALL_UNITS), + + /** + * The average compression ratio of duplicate follow-up tasks. This is the + * number of total follow-up tasks handled divided by the number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} was called to + * handle those tasks. + */ + averageFollowUpCompression(TASKS_PER_CALL_UNITS), + + /** + * The greatest compression ratio achieved by a single follow-up task. This the + * greatest number of duplicate follow-up tasks that were collapsed into a + * single task handling call to + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)}. + */ + greatestFollowUpCompression(TASKS_PER_CALL_UNITS), + + /** + * The average number of tasks in each task group. This only considers + * non-follow-up tasks. + */ + averageTaskGroupSize(TASK_UNITS), + + /** + * The greatest number of tasks encountered in a completed task group. This only + * considers non-follow-up tasks. + */ + greatestTaskGroupSize(TASK_UNITS), + + /** + * The ratio of cumulative {@link TaskHandler} handling time across all threads + * to actual active handling time. + */ + parallelism(null), + + /** + * The ratio of the number of times the {@link #dequeueTask()} function is + * called and a task is ready to be returned without waiting. + */ + dequeueHitRatio(null), + + /** + * The greatest number of tasks to be postponed at any given time due to + * contention on the resources being acted upon. + */ + greatestPostponedCount(TASK_UNITS), + + /** + * The cumulative time spent (in milliseconds) in the {@link #handleTasks()} + * function. + */ + taskHandling(MILLISECOND_UNITS), + + /** + * The cumulative time spent (in milliseconds) actively handling tasks. This + * excludes time waiting for messages to arrive. + */ + activelyHandling(MILLISECOND_UNITS), + + /** + * The cumulative time spent (in milliseconds) waiting for tasks to be + * scheduled. + */ + waitingForTasks(MILLISECOND_UNITS), + + /** + * The cumulative time spent (in milliseconds) not handling tasks while waiting + * for locks to be released for postponed tasks. + */ + waitingOnPostponed(MILLISECOND_UNITS), + + /** + * The time spent (in milliseconds) between handing a task off to a worker for + * processing and obtaining the next task to be processed. + */ + betweenTasks(MILLISECOND_UNITS), + + /** + * The time spent (in milliseconds) calling {@link #dequeueTask()} function to + * dequeue a task from the internal queue. This includes time waiting for the + * first task to arrive or the next task to arrive after the previous task has + * been handled. + */ + dequeue(MILLISECOND_UNITS), + + /** + * The time spent (in milliseconds) waiting to obtain the synchronized lock on + * the scheduling service in order to call the {@link #dequeueTask()} function. + */ + dequeueBlocking(MILLISECOND_UNITS), + + /** + * The time spent (in milliseconds) in the "wait loop" of {@link #dequeueTask()} + * waiting for a task to become available for processing. + */ + dequeueTaskWaitLoop(MILLISECOND_UNITS), + + /** + * The time spent (in milliseconds) in the synchronization wait of + * {@link #dequeueTask()} waiting for a task to become available for processing. + * This should be the majority of the time spent in + * {@link #dequeueTaskWaitLoop}, but isolates the non-busy sleeping time + * awaiting notification of task arrival. + */ + dequeueTaskWait(MILLISECOND_UNITS), + + /** + * The time spent (in milliseconds) checking to see if a task on the pending + * queue is locked and should be postponed for later processing. + */ + dequeueCheckLocked(MILLISECOND_UNITS), + + /** + * The number of milliseconds spent calling + * {@link #init(JsonObject, TaskHandler)}. + */ + initialize(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent checking pending tasks to see if + * one is ready to be processed. + */ + checkPending(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent checking follow-up tasks to see + * if they are now ready to be processed. + */ + checkFollowUp(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent checking postponed tasks to see + * if they are now ready to be processed. + */ + checkPostponed(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent obtaining locks on the affected + * resources. + */ + obtainLocks(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent waiting for an available worker + * thread to handle a task that has been pulled from the pending queue. + */ + waitForWorker(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent calling + * {@link TaskHandler#handleTask(String,Map,int,Scheduler)}. + */ + handleTask(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent calling + * {@link ScheduledTask#succeeded()} or {@link ScheduledTask#failed(Exception)}. + */ + markComplete(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent calling + * {@link #completeFollowUpTask(ScheduledTask)}. + */ + completeFollowUp(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent releasing locks on affected + * resources. + */ + releaseLocks(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent calling + * {@link #postProcess(ScheduledTask)}. + */ + postProcess(MILLISECOND_UNITS), + + /** + * The cumulative number of milliseconds spent calling {@link #destroy()}. + */ + destroy("ms"); + + /** + * Constructs the statistic instance with the associated units. + * + * @param units The units that the statistic is measured in. + */ + Stat(String units) { + this.units = units; + } + + /** + * The units for the statistic. + */ + private String units; + + /** + * Gets the unit of measure for this statistic. This is the unit that the + * {@link Number} value is measured in when calling + * {@link AbstractSchedulingService#getStatistics()}} + * + * @return The unit of measure for this statistic. + */ + public String getUnits() { + return this.units; + } + } /** - * The number of task groups that had all of their tasks handled - * successfully without any exceptions. + * The {@link TaskHandler} for handling the tasks. */ - taskGroupSuccessCount(TASK_GROUP_UNITS), + private TaskHandler taskHandler = null; /** - * The number of task groups that have completed but have had at least one - * failure with one of the associated tasks. + * The {@link LockingService} to use. */ - taskGroupFailureCount(TASK_GROUP_UNITS), + private LockingService lockingService = null; /** - * The average compression ratio of duplicate non-follow-up tasks. This is - * the number of total non-follow-up tasks handled divided by the number of - * times {@link TaskHandler#handleTask(String, Map, int, Scheduler)} was called - * to handle those tasks. + * The {@link State} for this instance. */ - averageCompression(TASKS_PER_CALL_UNITS), + private State state = UNINITIALIZED; /** - * The greatest compression ratio achieved by a single non-follow-up task. - * This the greatest number of duplicate non-follow-up tasks that were - * collapsed into a single task handling call to {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)}. + * The {@link List} of {@link TaskType} instances specifying the + * {@link TaskType} order. */ - greatestCompression(TASKS_PER_CALL_UNITS), + private List taskTypeOrder = null; /** - * The average compression ratio of duplicate follow-up tasks. This is - * the number of total follow-up tasks handled divided by the number of - * times {@link TaskHandler#handleTask(String, Map, int, Scheduler)} was called - * to handle those tasks. + * The current index into the {@link #taskTypeOrder} list. */ - averageFollowUpCompression(TASKS_PER_CALL_UNITS), + private int taskTypeIndex = 0; /** - * The greatest compression ratio achieved by a single follow-up task. - * This the greatest number of duplicate follow-up tasks that were - * collapsed into a single task handling call to {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)}. + * Flag indicating that {@link #handleTasks()} has been called and is currently + * running to prevent more than one call for this object process-wide. */ - greatestFollowUpCompression(TASKS_PER_CALL_UNITS), + private boolean handlingTasks = false; /** - * The average number of tasks in each task group. This only considers - * non-follow-up tasks. + * The concurrency for this instance. This is the maximum number of threads used + * to handle tasks. */ - averageTaskGroupSize(TASK_UNITS), + private int concurrency = DEFAULT_CONCURRENCY; /** - * The greatest number of tasks encountered in a completed task group. - * This only considers non-follow-up tasks. + * The {@link AsyncWorkerPool} returning {@link TaskResult} instances. */ - greatestTaskGroupSize(TASK_UNITS), + private AsyncWorkerPool workerPool = null; /** - * The ratio of cumulative {@link TaskHandler} handling time across - * all threads to actual active handling time. + * The number of milliseconds to sleep between checks on the locks required for + * tasks that have been postponed due to contention. This timeout is used when + * there are pending tasks that have been postponed due to contention. */ - parallelism(null), + private long postponedTimeout = DEFAULT_POSTPONED_TIMEOUT; /** - * The ratio of the number of times the {@link #dequeueTask()} function is - * called and a task is ready to be returned without waiting. + * The number of milliseconds to sleep between checking to see if task handling + * should cease. This timeout is used when there are no postponed tasks due to + * contention. */ - dequeueHitRatio(null), + private long standardTimeout = DEFAULT_STANDARD_TIMEOUT; /** - * The greatest number of tasks to be postponed at any given time - * due to contention on the resources being acted upon. + * The number of milliseconds to delay before attempting to execute a follow-up + * task. This delay is used to give the opportunity to receive duplicate + * follow-up tasks that can be collapsed. Whenever a duplicate is collapsed, the + * delay timer starts over unless the {@link #followUpTimeout} has been reached. */ - greatestPostponedCount(TASK_UNITS), + private long followUpDelay = DEFAULT_FOLLOW_UP_DELAY; /** - * The cumulative time spent (in milliseconds) in the {@link - * #handleTasks()} function. + * The number of milliseconds to defer a follow-up task. Once a follow-up task + * has been deferred this number of milliseconds it will no longer be delayed to + * wait for additional duplicates to be scheduled and collapsed. */ - taskHandling(MILLISECOND_UNITS), + private long followUpTimeout = DEFAULT_FOLLOW_UP_TIMEOUT; /** - * The cumulative time spent (in milliseconds) actively handling tasks. - * This excludes time waiting for messages to arrive. + * The number of follow-up tasks to retrieve from persistent storage on a single + * retrieval. These retrieved follow-up tasks should be handled within the + * configured {@link #followUpTimeout}. */ - activelyHandling(MILLISECOND_UNITS), + private int followUpFetch = DEFAULT_FOLLOW_UP_FETCH; /** - * The cumulative time spent (in milliseconds) waiting for tasks to be - * scheduled. + * The {@link List} of pending tasks. */ - waitingForTasks(MILLISECOND_UNITS), + private List pendingTasks; /** - * The cumulative time spent (in milliseconds) not handling tasks - * while waiting for locks to be released for postponed tasks. + * The {@link List} of delayed/postponed tasks. */ - waitingOnPostponed(MILLISECOND_UNITS), + private List postponedTasks; /** - * The time spent (in milliseconds) between handing a task off to a - * worker for processing and obtaining the next task to be processed. + * The {@link Map} of {@link String} signature keys to {@link ScheduledTask} + * instances. */ - betweenTasks(MILLISECOND_UNITS), + private Map taskCollapseLookup; /** - * The time spent (in milliseconds) calling {@link #dequeueTask()} function - * to dequeue a task from the internal queue. This includes time waiting - * for the first task to arrive or the next task to arrive after the - * previous task has been handled. + * The {@link List} of follow-up tasks. */ - dequeue(MILLISECOND_UNITS), + private List followUpTasks; /** - * The time spent (in milliseconds) waiting to obtain the synchronized lock - * on the scheduling service in order to call the {@link #dequeueTask()} - * function. + * The {@link List} of follow-up tasks that are currently being worked on. */ - dequeueBlocking(MILLISECOND_UNITS), + private IdentityHashMap inProgressFollowUpTasks; /** - * The time spent (in milliseconds) in the "wait loop" of - * {@link #dequeueTask()} waiting for a task to become available for - * processing. + * This is the scheduling thread that handles managing and dispatching tasks. */ - dequeueTaskWaitLoop(MILLISECOND_UNITS), + private Thread taskHandlingThread = null; /** - * The time spent (in milliseconds) in the synchronization wait of - * {@link #dequeueTask()} waiting for a task to become available for - * processing. This should be the majority of the time spent in - * {@link #dequeueTaskWaitLoop}, but isolates the non-busy sleeping - * time awaiting notification of task arrival. + * The nanosecond timestamp when the postponed tasks were last checked to see if + * one was ready. */ - dequeueTaskWait(MILLISECOND_UNITS), + private long postponedNanoTime = -2 * (DEFAULT_POSTPONED_TIMEOUT * 1000000L); /** - * The time spent (in milliseconds) checking to see if a task on the - * pending queue is locked and should be postponed for later processing. + * The nanosecond timestamp when the follow-up tasks were last checked to see if + * one was ready. */ - dequeueCheckLocked(MILLISECOND_UNITS), + private long followUpNanoTime = -2 * (DEFAULT_FOLLOW_UP_DELAY * 1000000L); /** - * The number of milliseconds spent calling {@link - * #init(JsonObject, TaskHandler)}. + * The nano-second time at which to renew the lease on any dequeued follow-up + * tasks. */ - initialize(MILLISECOND_UNITS), + private long followUpRenewNanos = 0L; /** - * The cumulative number of milliseconds spent checking pending tasks - * to see if one is ready to be processed. + * The total of the number of round-trip milliseconds for all task groups. */ - checkPending(MILLISECOND_UNITS), + private long totalTaskGroupTime = 0L; /** - * The cumulative number of milliseconds spent checking follow-up tasks - * to see if they are now ready to be processed. + * The longest time it has taken a task to round-trip from scheduling to + * completion. */ - checkFollowUp(MILLISECOND_UNITS), + private long longestTaskGroupTime = -1L; /** - * The cumulative number of milliseconds spent checking postponed tasks - * to see if they are now ready to be processed. + * The total number of round-trip milliseconds for all tasks. */ - checkPostponed(MILLISECOND_UNITS), + private long totalTaskTime = 0L; /** - * The cumulative number of milliseconds spent obtaining locks on the - * affected resources. + * The longest number of milliseconds to round-trip from scheduling to + * completion of any given task. */ - obtainLocks(MILLISECOND_UNITS), + private long longestTaskTime = -1L; /** - * The cumulative number of milliseconds spent waiting for an available - * worker thread to handle a task that has been pulled from the pending - * queue. + * The total number of milliseconds spent in the handling of all tasks. Keep in + * mind that this accounts for collapsed tasks so that the handling of collapsed + * tasks is only counted once. */ - waitForWorker(MILLISECOND_UNITS), + private long totalHandlingTime = 0L; /** - * The cumulative number of milliseconds spent calling {@link - * TaskHandler#handleTask(String,Map,int,Scheduler)}. + * The longest number of milliseconds spent handling a task. Keep in mind that + * this handling time may have completed multiple collapsed tasks. */ - handleTask(MILLISECOND_UNITS), + private long longestHandlingTime = -1L; /** - * The cumulative number of milliseconds spent calling {@link - * ScheduledTask#succeeded()} or {@link ScheduledTask#failed(Exception)}. + * The number of task groups that have been completed, successful or not. */ - markComplete(MILLISECOND_UNITS), + private long taskGroupCount = 0L; /** - * The cumulative number of milliseconds spent calling {@link - * #completeFollowUpTask(ScheduledTask)}. + * The number of tasks that have been handled whether successful or failed. This + * excluded aborted tasks. */ - completeFollowUp(MILLISECOND_UNITS), + private long taskCompleteCount = 0L; /** - * The cumulative number of milliseconds spent releasing locks on affected - * resources. + * The number of times the + * {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} + * method has been successfully called. */ - releaseLocks(MILLISECOND_UNITS), + private long taskSuccessCount = 0L; /** - * The cumulative number of milliseconds spent calling {@link - * #postProcess(ScheduledTask)}. + * The number of times the + * {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} + * method has been called and thrown an exception. */ - postProcess(MILLISECOND_UNITS), + private long taskFailureCount = 0L; /** - * The cumulative number of milliseconds spent calling {@link - * #destroy()}. + * The number of times the + * {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} + * method has been successfully called. */ - destroy("ms"); + private long taskAbortCount = 0L; /** - * Constructs the statistic instance with the associated units. - * - * @param units The units that the statistic is measured in. - */ - Stat(String units) { - this.units = units; - } - - /** - * The units for the statistic. - */ - private String units; - - /** - * Gets the unit of measure for this statistic. This is the unit that - * the {@link Number} value is measured in when calling {@link - * AbstractSchedulingService#getStatistics()}} - * - * @return The unit of measure for this statistic. - */ - public String getUnits() { - return this.units; - } - } - - /** - * The {@link TaskHandler} for handling the tasks. - */ - private TaskHandler taskHandler = null; - - /** - * The {@link LockingService} to use. - */ - private LockingService lockingService = null; - - /** - * The {@link State} for this instance. - */ - private State state = UNINITIALIZED; - - /** - * The {@link List} of {@link TaskType} instances specifying the {@link - * TaskType} order. - */ - private List taskTypeOrder = null; - - /** - * The current index into the {@link #taskTypeOrder} list. - */ - private int taskTypeIndex = 0; - - /** - * Flag indicating that {@link #handleTasks()} has been called and is - * currently running to prevent more than one call for this object - * process-wide. - */ - private boolean handlingTasks = false; - - /** - * The concurrency for this instance. This is the maximum number of threads - * used to handle tasks. - */ - private int concurrency = DEFAULT_CONCURRENCY; - - /** - * The {@link AsyncWorkerPool} returning {@link TaskResult} instances. - */ - private AsyncWorkerPool workerPool = null; - - /** - * The number of milliseconds to sleep between checks on the locks required - * for tasks that have been postponed due to contention. This timeout is - * used when there are pending tasks that have been postponed due to - * contention. - */ - private long postponedTimeout = DEFAULT_POSTPONED_TIMEOUT; - - /** - * The number of milliseconds to sleep between checking to see if task - * handling should cease. This timeout is used when there are no postponed - * tasks due to contention. - */ - private long standardTimeout = DEFAULT_STANDARD_TIMEOUT; - - /** - * The number of milliseconds to delay before attempting to execute a - * follow-up task. This delay is used to give the opportunity to receive - * duplicate follow-up tasks that can be collapsed. Whenever a duplicate is - * collapsed, the delay timer starts over unless the {@link - * #followUpTimeout} has been reached. - */ - private long followUpDelay = DEFAULT_FOLLOW_UP_DELAY; - - /** - * The number of milliseconds to defer a follow-up task. Once a follow-up - * task has been deferred this number of milliseconds it will no longer be - * delayed to wait for additional duplicates to be scheduled and collapsed. - */ - private long followUpTimeout = DEFAULT_FOLLOW_UP_TIMEOUT; - - /** - * The number of follow-up tasks to retrieve from persistent storage on a - * single retrieval. These retrieved follow-up tasks should be handled - * within the configured {@link #followUpTimeout}. - */ - private int followUpFetch = DEFAULT_FOLLOW_UP_FETCH; - - /** - * The {@link List} of pending tasks. - */ - private List pendingTasks; - - /** - * The {@link List} of delayed/postponed tasks. - */ - private List postponedTasks; - - /** - * The {@link Map} of {@link String} signature keys to {@link ScheduledTask} - * instances. - */ - private Map taskCollapseLookup; - - /** - * The {@link List} of follow-up tasks. - */ - private List followUpTasks; - - /** - * The {@link List} of follow-up tasks that are currently being worked on. - */ - private IdentityHashMap inProgressFollowUpTasks; - - /** - * This is the scheduling thread that handles managing and dispatching tasks. - */ - private Thread taskHandlingThread = null; - - /** - * The nanosecond timestamp when the postponed tasks were last checked to - * see if one was ready. - */ - private long postponedNanoTime = -2 * (DEFAULT_POSTPONED_TIMEOUT * 1000000L); - - /** - * The nanosecond timestamp when the follow-up tasks were last checked to - * see if one was ready. - */ - private long followUpNanoTime = -2 * (DEFAULT_FOLLOW_UP_DELAY * 1000000L); - - /** - * The nano-second time at which to renew the lease on any dequeued follow-up - * tasks. - */ - private long followUpRenewNanos = 0L; - - /** - * The total of the number of round-trip milliseconds for all task groups. - */ - private long totalTaskGroupTime = 0L; - - /** - * The longest time it has taken a task to round-trip from scheduling to - * completion. - */ - private long longestTaskGroupTime = -1L; - - /** - * The total number of round-trip milliseconds for all tasks. - */ - private long totalTaskTime = 0L; - - /** - * The longest number of milliseconds to round-trip from scheduling to - * completion of any given task. - */ - private long longestTaskTime = -1L; - - /** - * The total number of milliseconds spent in the handling of all tasks. - * Keep in mind that this accounts for collapsed tasks so that the handling - * of collapsed tasks is only counted once. - */ - private long totalHandlingTime = 0L; - - /** - * The longest number of milliseconds spent handling a task. Keep in mind - * that this handling time may have completed multiple collapsed tasks. - */ - private long longestHandlingTime = -1L; - - /** - * The number of task groups that have been completed, successful or not. - */ - private long taskGroupCount = 0L; - - /** - * The number of tasks that have been handled whether successful or failed. - * This excluded aborted tasks. - */ - private long taskCompleteCount = 0L; - - /** - * The number of times the {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} - * method has been successfully called. - */ - private long taskSuccessCount = 0L; - - /** - * The number of times the {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} - * method has been called and thrown an exception. - */ - private long taskFailureCount = 0L; - - /** - * The number of times the {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} - * method has been successfully called. - */ - private long taskAbortCount = 0L; - - /** - * The number of follow-up tasks that have been handled whether successful or - * failed. This excluded aborted tasks. - */ - private long followUpCompleteCount = 0L; - - /** - * The number of times the {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} - * method has been successfully called. - */ - private long followUpSuccessCount = 0L; - - /** - * The number of times the {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} - * method has been called and thrown an exception. - */ - private long followUpFailureCount = 0L; - - /** - * The number of task groups that have successfully completed. - */ - private long groupSuccessCount = 0L; - - /** - * The greatest number of tasks encountered for a task group. - */ - private int greatestGroupSize = 0; - - /** - * The number of task groups that have completed with failures. - */ - private long groupFailureCount = 0L; - - /** - * The number of {@link ScheduledTask} instances handled. Each {@link - * ScheduledTask} may be backed by multiple duplicate actual {@link Task} - * instances. - */ - private long handleCount = 0L; - - /** - * The number of non-follow-up {@link ScheduledTask} instances handled. Each - * {@link ScheduledTask} may be backed by multiple duplicate actual {@link - * Task} instances. - */ - private long standardHandleCount = 0L; - - /** - * The number of follow-up {@link ScheduledTask} instances handled. Each - * {@link ScheduledTask} may be backed by multiple duplicate actual {@link - * Task} instances. - */ - private long followUpHandleCount = 0L; - - /** - * The number of {@link ScheduledTask} instances handled successfully. Each - * {@link ScheduledTask} may be backed by multiple duplicate actual {@link - * Task} instances. - */ - private long handleSuccessCount = 0L; - - /** - * The number of {@link ScheduledTask} instances handled unsuccessfully. Each - * {@link ScheduledTask} may be backed by multiple duplicate actual {@link - * Task} instances. - */ - private long handleFailureCount = 0L; - - /** - * The greatest task multiplicity encountered for non-follow-up tasks. - */ - private int greatestMultiplicity = 0; - - /** - * The greatest task multiplicity encountered for follow-up tasks. - */ - private int greatestFollowUpMultiplicity = 0; - - /** - * The total number of times an attempt was made to dequeue a message and - * one was ready. - */ - private long dequeueHitCount = 0L; - - /** - * The total number of times an attempt was made to dequeue a message and one - * was not ready to be dequeued. - */ - private long dequeueMissCount = 0L; - - /** - * The greatest number of info messages that are postponed at any one time. - */ - private int greatestPostponedCount = 0; - - /** - * The object used for synchronization when working with statistics. - */ - private final Object statsMonitor = new Object(); - - /** - * The processing {@link Timers}. - */ - private final Timers timers = new Timers(); - - /** - * Flag to use to suppress checking if already handling tasks when - * backgrounding task handling. - */ - private static final ThreadLocal SUPPRESS_HANDLING_CHECK = new ThreadLocal<>(); - - /** - * Default constructor. - */ - protected AbstractSchedulingService() { - this.taskHandler = null; - this.lockingService = null; - this.state = UNINITIALIZED; - this.taskTypeIndex = 0; - this.taskTypeOrder = Arrays.asList(TaskType.values()); - } - - /** - * Gets the {@link State} of this instance. - * - * @return The {@link State} of this instance. - */ - public synchronized State getState() { - return this.state; - } - - /** - * Provides a means to set the {@link State} for this instance as a - * synchronized method that will notify all upon changing the state. - * - * @param state The {@link State} for this instance. - */ - protected synchronized void setState(State state) { - Objects.requireNonNull(state, "State cannot be null"); - this.state = state; - this.notifyAll(); - } - - /** - * Checks if this instance is current handling tasks. This is used to - * synchronize destruction. The {@link #doDestroy()} method is not called - * until task handling ceases. - * - * @return true if this instance is still handling tasks, - * otherwise false. - * - */ - protected synchronized boolean isHandlingTasks() { - return this.handlingTasks; - } - - /** - * The {@link Object} to synchronize on when computing and recording - * statistics in a thread-safe manner. - * - * @return The {@link Object} to synchronize on when computing and recording - * statistics in a thread-safe manner. - */ - protected final Object getStatisticsMonitor() { - return this.statsMonitor; - } - - /** - * Gets the concurrency of the scheduler -- this is the number of threads it - * will use to handle tasks. The returned value will be a positive number - * greater than or equal to one (1). - * - * @return The concurrency of the scheduler (i.e.: the number of threads it - * will use to handle tasks). - */ - public int getConcurrency() { - return this.concurrency; - } - - /** - * Gets the default concurrency with which to initialize if one is not specified - * in the initialization configuration via the {@link #CONCURRENCY_KEY} - * initialization parameter. By default, this returns {@link - * #DEFAULT_CONCURRENCY}, but it may be overridden to return something more - * sensible for a derived implementation. - * - * @return The default concurrency with which to initialize. - * - * @see #getConcurrency() - * @see #CONCURRENCY_KEY - * @see #DEFAULT_CONCURRENCY - */ - public int getDefaultConcurrency() { - return DEFAULT_CONCURRENCY; - } - - /** - * Gets the number of milliseconds to sleep between checks on the locks - * required for tasks that have been postponed due to contention. This - * timeout is used when there are pending tasks that have been postponed - * due to contention. - * - * @return The number of milliseconds to sleep between checks on the locks - * required for tasks that have been postponed due to contention. - */ - public long getPostponedTimeout() { - return this.postponedTimeout; - } - - /** - * Gets the default postponed timeout with which to initialize if one is not - * specified in the initialization configuration via the {@link - * #POSTPONED_TIMEOUT_KEY} initialization parameter. By default, this returns - * {@link #DEFAULT_POSTPONED_TIMEOUT}, but it may be overridden to return - * something more sensible for a derived implementation. - * - * @return The default postponed timeout with which to initialize. - * - * @see #getPostponedTimeout() - * @see #POSTPONED_TIMEOUT_KEY - * @see #DEFAULT_POSTPONED_TIMEOUT - */ - public long getDefaultPostponedTimeout() { - return DEFAULT_POSTPONED_TIMEOUT; - } - - /** - * Gets the number of milliseconds to delay before attempting to execute a - * follow-up task. This delay is used to give the opportunity to receive - * duplicate follow-up tasks that can be collapsed. Whenever a duplicate is - * collapsed, the delay timer starts over unless the {@linkplain - * #getFollowUpTimeout() maximum follow-up deferral time} has been reached. - * - * @return The number of milliseconds to delay before attempting to execute a - * follow-up task. - */ - public long getFollowUpDelay() { - return this.followUpDelay; - } - - /** - * Gets the default follow-up delay with which to initialize if one is not - * specified in the initialization configuration via the {@link - * #FOLLOW_UP_DELAY_KEY} initialization parameter. By default, this returns - * {@link #DEFAULT_FOLLOW_UP_DELAY}, but it may be overridden to return - * something more sensible for a derived implementation. - * - * @return The default follow-up delay with which to initialize. - * - * @see #getFollowUpDelay() - * @see #FOLLOW_UP_DELAY_KEY - * @see #DEFAULT_FOLLOW_UP_DELAY - */ - public long getDefaultFollowUpDelay() { - return DEFAULT_FOLLOW_UP_DELAY; - } - - /** - * The maximum number of milliseconds to defer a follow-up task. Once a - * follow-up task has been deferred this number of milliseconds it will no - * longer be purposely delayed to wait for additional duplicates to be - * scheduled and collapsed. It may be delayed because of a lack of resources - * to handle it. - * - * @return The maximum number of milliseconds to defer a follow-up task. - */ - public long getFollowUpTimeout() { - return this.followUpTimeout; - } - - /** - * Gets the default follow-up timeout with which to initialize if one is not - * specified in the initialization configuration via the {@link - * #FOLLOW_UP_TIMEOUT_KEY} initialization parameter. By default, this returns - * {@link #DEFAULT_FOLLOW_UP_TIMEOUT}, but it may be overridden to return - * something more sensible for a derived implementation. - * - * @return The default follow-up timeout with which to initialize. - * - * @see #getFollowUpTimeout() - * @see #FOLLOW_UP_TIMEOUT_KEY - * @see #DEFAULT_FOLLOW_UP_TIMEOUT - */ - public long getDefaultFollowUpTimeout() { - return DEFAULT_FOLLOW_UP_TIMEOUT; - } - - /** - * Gets the number of milliseconds to lease follow-up messages for handling - * before they become available to be obtained again. The default - * implementation returns twice the {@linkplain #getFollowUpTimeout() - * follow-up timeout}. - * - * @return The number of milliseconds to lease follow-up messages for handling - * before they become available to be obtained again. - */ - public long getFollowUpLeaseTime() { - return this.getFollowUpTimeout() * 2; - } - - /** - * The configured maximum number of follow-up tasks to retrieve from - * persistent search on a single retrieval. The retrieved follow-up tasks - * should be handled within the {@linkplain #getFollowUpTimeout() follow-up - * timeout} and so this number should not be so large that the tasks are not - * handled or their retrieval is renewed within the allotted time. - * - * @return The configured maximum number of follow-up tasks to retrieve from - * persistent storage on a single retrieval. - */ - public int getFollowUpFetchCount() { - return this.followUpFetch; - } - - /** - * Gets the default follow-up fetch count with which to initialize if one is - * not specified in the initialization configuration via the {@link - * #FOLLOW_UP_FETCH_KEY} initialization parameter. By default, this returns - * {@link #DEFAULT_FOLLOW_UP_FETCH}, but it may be overridden to return - * something more sensible for a derived implementation. - * - * @return The default follow-up fetch count with which to initialize. - * - * @see #getFollowUpFetchCount() - * @see #FOLLOW_UP_FETCH_KEY - * @see #DEFAULT_FOLLOW_UP_FETCH - */ - public int getDefaultFollowUpFetchCount() { - return DEFAULT_FOLLOW_UP_FETCH; - } - - /** - * Gets the number of milliseconds to sleep between checking to see if task - * handling should cease. This timeout is used when there are no postponed - * tasks due to contention. - * - * @return The number of milliseconds to sleep between checking to see if - * task handling should cease. This timeout is used when there - * are no postponed tasks due to contention. - */ - public long getStandardTimeout() { - return this.standardTimeout; - } - - /** - * Gets the default standard timeout with which to initialize if one is not - * specified in the initialization configuration via the {@link - * #STANDARD_TIMEOUT_KEY} initialization parameter. By default, this returns - * {@link #DEFAULT_STANDARD_TIMEOUT}, but it may be overridden to return - * something more sensible for a derived implementation. - * - * @return The default standard timeout with which to initialize. - * - * @see #getStandardTimeout() - * @see #STANDARD_TIMEOUT_KEY - * @see #DEFAULT_STANDARD_TIMEOUT - */ - public long getDefaultStandardTimeout() { - return DEFAULT_STANDARD_TIMEOUT; - } - - /** - * Gets the {@link TaskHandler} for this instance. - * - * @return The {@link TaskHandler} for this instance. - */ - @Override - public TaskHandler getTaskHandler() { - return this.taskHandler; - } - - /** - * Sets the {@link TaskHandler} for this instance. - * - * @param taskHandler The {@link TaskHandler} for this instance. - */ - protected void setTaskHandler(TaskHandler taskHandler) { - this.taskHandler = taskHandler; - } - - /** - * Gets the {@link LockingService} for this instance. - * - * @return The {@link LockingService} for this instance. - */ - @Override - public LockingService getLockingService() { - return this.lockingService; - } - - /** - * Sets the {@link LockingService} for this instance. - * - * @param lockingService The {@link LockingService} for this instance. - */ - protected void setLockingService(LockingService lockingService) { - this.lockingService = lockingService; - } - - /** - * Gets the default {@link LockingService} class name with which to - * initialize the backing {@link LockingService} if one is not specified in - * the initialization configuration via the {@link #LOCKING_SERVICE_CLASS_KEY} - * initialization parameter. By default, this returns the {@link - * #DEFAULT_LOCKING_SERVICE_CLASS_NAME}, but it may be overridden to return - * something more sensible for a derived implementation. - * - * @return The default {@link LockingService} class name with which to - * initialize. - * - * @see #initLockingService(JsonObject) - * @see #getDefaultLockingServiceConfig() - * @see #LOCKING_SERVICE_CLASS_KEY - * @see #LOCKING_SERVICE_CONFIG_KEY - * @see #DEFAULT_LOCKING_SERVICE_CLASS_NAME - */ - public String getDefaultLockingServiceClassName() { - return DEFAULT_LOCKING_SERVICE_CLASS_NAME; - } - - /** - * Gets the default {@link JsonObject} configuration with which to initialize - * the backing {@link LockingService} if one is not specified in the - * initialization configuration via the {@link #LOCKING_SERVICE_CONFIG_KEY} - * initialization parameter. By default, this returns the null, - * but it may be overridden to return something more sensible for a derived - * implementation. - * - * @return The default {@link JsonObject} configuration with which to - * initialize the backing {@link LockingService}. - * - * @see #initLockingService(JsonObject) - * @see #getDefaultLockingServiceClassName() - * @see #LOCKING_SERVICE_CLASS_KEY - * @see #LOCKING_SERVICE_CONFIG_KEY - * @see #DEFAULT_LOCKING_SERVICE_CLASS_NAME - */ - public JsonObject getDefaultLockingServiceConfig() { - return null; - } - - /** - * {@inheritDoc} - */ - @Override - public Scheduler createScheduler(boolean followUp) { - if (followUp) { - return new DefaultScheduler(this); - } else { - TaskGroup taskGroup = new TaskGroup(); - return new DefaultScheduler(this, taskGroup); - } - } - - /** - * Creates a {@link Scheduler} for creating follow-up tasks to the specified - * {@link ScheduledTask} unless follow-up tasks are not supported for the - * specified {@link ScheduledTask}. The default implementation will - * always create a {@link DefaultScheduler} that will not have an - * associated {@link TaskGroup}. - * - * @param task The {@link ScheduledTask} for which to create the follow-up - * scheduler. - * @return The follow-up {@link Scheduler} or null if follow-up - * tasks are not allowed for the specified {@link ScheduledTask}. - */ - protected Scheduler createFollowUpScheduler(ScheduledTask task) { - // create a follow-up scheduler - return new DefaultScheduler(this); - } - - /** - * Schedules the tasks in the specified {@link List}. - * - * @param tasks The {@link List} of {@link Task} instances. - * - * @throws ServiceExecutionException If a failure occurs in scheduling the - * tasks. If a failure occurs then it - * should be assumed that the tasks will - * not be handled and the associated - * message should be retried later. - */ - protected void scheduleTasks(List tasks) - throws ServiceExecutionException { - synchronized (this) { - State state = this.getState(); - if (state != READY && state != ACTIVE) { - throw new IllegalStateException( - "Cannot schedule tasks if not in the " + READY + " or " + ACTIVE - + " state: " + state); - } - } - - // loop through the tasks - for (Task task : tasks) { - synchronized (this) { - // get the task group - TaskGroup taskGroup = task.getTaskGroup(); - - // check if this is a follow-up task - if (taskGroup == null) { - logDebug("ENQUEUEING FOLLOW-UP TASK: ", task); - - // enqueue the follow-up task for later retrieval - this.enqueueFollowUpTask(task); - - // notify all that a new follow-up task was enqueued - this.notifyAll(); - continue; - } - - // get the task signature - String signature = task.getSignature(); - - // check if the specified task allows collapse - if (task.isAllowingCollapse()) { - // check for existing tasks by the same signature - ScheduledTask scheduledTask = this.taskCollapseLookup.get(signature); - if (scheduledTask != null) { - logDebug("SCHEDULING TASK: ", task, - "COLLAPSING WITH: ", scheduledTask); - - // simply collapse with the existing scheduled task - scheduledTask.collapseWith(task); - - } else { - // create a scheduled task and add to the pending queue - scheduledTask = new ScheduledTask(task); - logDebug("SCHEDULING TASK: ", task); - this.pendingTasks.add(scheduledTask); - this.taskCollapseLookup.put(signature, scheduledTask); - } - - } else { - // the specified task cannot be collapsed with another - logDebug("SCHEDULING NON-COLLAPSING TASK: ", task); - ScheduledTask scheduledTask = new ScheduledTask(task); - this.pendingTasks.add(scheduledTask); - } - - // for good measure notify all that a new task was scheduled - this.notifyAll(); - } - } - } - - /** - * Dequeues a previously enqueued {@link ScheduledTask}. - * - * @return The {@link ScheduledTask} that was dequeued. - */ - protected synchronized ScheduledTask dequeueTask() { - this.timerPause(dequeueBlocking); - this.timerStart(dequeueTaskWaitLoop); - - // set the hit flag to true - boolean hit = true; - - int prevPendingCount = -1; - int prevPostponedCount = -1; - - // wait for a task to be available - while (this.getState().isAvailable() - && (this.pendingTasks.size() == 0) - && (!this.isFollowUpReadyCheckTime()) - && (!this.isPostponedReadyCheckTime())) { - // if we get here then no task was ready so we have a miss - hit = false; - - // toggle the timers - this.toggleActiveAndWaitingTimers(this.pendingTasks.size(), - this.postponedTasks.size(), - this.workerPool.isBusy()); - - // determine if postponed tasks exist - boolean postponed = (this.getPostponedTaskCount() > 0); - - // determine how long to wait - long timeout = (postponed) - ? Math.min(this.getPostponedTimeout(), this.getStandardTimeout()) - : this.getStandardTimeout(); - - // wait for the designated duration - this.timerStart(dequeueTaskWait); - try { - logDebug("SLEEPING BEFORE RETRIEVING " - + (postponed ? "POSTPONED" : "FOLLOW-UP") - + " TASK: " + timeout); - this.wait(timeout); - - } catch (InterruptedException ignore) { - // ignore the interruption - } finally { - this.timerPause(dequeueTaskWait); - } - } - this.timerPause(dequeueTaskWaitLoop); - - // grab a postponed task if available - ScheduledTask task = null; - TaskType taskType = null; - int taskTypeCount = this.taskTypeOrder.size(); - for (int index = 0; index < taskTypeCount && task == null; index++) { - taskType = this.taskTypeOrder.get(this.taskTypeIndex++); - this.taskTypeIndex = this.taskTypeIndex % taskTypeCount; - switch (taskType) { - case PENDING: - this.timerStart(checkPending); - try { - task = this.getReadyPendingTask(); - } catch (Exception e) { - logWarning(e, "FAILED TO OBTAIN A TASK FROM THE PENDING QUEUE"); - } finally { - this.timerPause(checkPending); - } - break; - - case POSTPONED: - this.timerStart(checkPostponed); - try { - task = this.getReadyPostponedTask(); - } catch (Exception e) { - logWarning(e, "FAILED TO OBTAIN A POSTPONED TASK, " - + "DEFERRING POSTPONED TASKS FOR NOW"); - } finally { - this.timerPause(checkPostponed); - } - break; - - case FOLLOW_UP: - this.timerStart(checkFollowUp); - try { - task = this.getReadyFollowUpTask(); - } catch (ServiceExecutionException e) { - logWarning(e, "FAILED TO OBTAIN A FOLLOW-UP TASK, " - + "DEFERRING FOLLOW-UP TASKS FOR NOW"); - } finally { - this.timerPause(checkFollowUp); - } - break; - - default: - throw new IllegalStateException( - "Unrecognized task type: " + taskType); - } - } - - // if not null then return the task - if (task != null) { - // ensure the timers toggled correctly - this.timerPause(waitingOnPostponed, waitingForTasks); - this.timerStart(activelyHandling); - this.updateDequeueHitRatio(hit); - - // update the state - if (this.getState() == READY) { - this.setState(ACTIVE); - } - - // check if we need to remove from the collapse lookup - if (!task.isFollowUp() && task.isAllowingCollapse()) { - ScheduledTask collapse = this.taskCollapseLookup.remove(task.getSignature()); - if (task != collapse) { - throw new IllegalStateException( - "Collapse lookup table did not contain the same task as was " - + "dequeued. expected=[ " + task + " ], actual=[ " + collapse - + " ]"); - } - } - - // return the task for handling - return task; - } - - this.toggleActiveAndWaitingTimers(this.pendingTasks.size(), - this.postponedTasks.size(), - this.workerPool.isBusy()); - this.updateDequeueHitRatio(false); - - // update the state - if ((this.getState() == ACTIVE) - && (this.pendingTasks.size() == 0) - && (this.postponedTasks.size() == 0) - && (!this.workerPool.isBusy())) { - // no pending or postponed tasks, no tasks being handled and we have none - // to return the user (e.g.: follow-up tasks), go from ACTIVE to READY - this.setState(READY); - - } else if (this.getState() == READY) { - // we are either busy handling tasks or we have pending or postponed tasks - // and we are in the READY state so transition to ACTIVE - this.setState(ACTIVE); - } - - return null; - } - - /** - * Returns a {@link ScheduledTask} from the pending queue that is ready for - * handling. This method will find the least-recently-scheduled task whose set - * of affected resources (identified by {@link ResourceKey} instances) could - * be locked without blocking and locks those resources. If no such pending - * task could be found then null is returned. - * - * @return The next pending {@link ScheduledTask} that is now ready to try, or - * null if none are ready to try. - */ - protected synchronized ScheduledTask getReadyPendingTask() { - this.timerStart(dequeueCheckLocked); - try { - // if none ready then check if we can grab a pending task - while (this.pendingTasks.size() > 0) { - // get the candidate task - ScheduledTask task = this.pendingTasks.remove(0); - - // check if the task is aborted - if (this.skipIfAborted(task)) { - continue; - } - - // attempt to lock the task resources - this.timerStart(obtainLocks); - boolean locked = task.acquireLocks(this.getLockingService()); - this.timerPause(obtainLocks); - - // if the lock was obtained, return the task - if (locked) { - return task; - } - - // if not locked then postpone the task - this.postponedTasks.add(task); - - // check the postponed count to see if this is now the greatest - synchronized (this.getStatisticsMonitor()) { - int postponedCount = this.postponedTasks.size(); - if (postponedCount > this.greatestPostponedCount) { - this.greatestPostponedCount = postponedCount; - } - } - // notify all - this.notifyAll(); - } - - // if we get here then return null - return null; - - } finally { - this.timerPause(dequeueCheckLocked); - } - } - - /** - * Returns a previously postponed {@link ScheduledTask} that is now ready to - * be processed. If the last time this method was called was less than the - * {@linkplain #getPostponedTimeout() postpone timeout} then this method returns - * null so that the previously postponed tasks are not checked - * for readiness too frequently. Otherwise, this method will find the least - * recently postponed {@link ScheduledTask} whose set of affected resources - * (identified by {@link ResourceKey} instances) are not currently locked. - * If there are no postponed {@link ScheduledTask} instance that meet the - * readiness criteria, then null is returned. - * - * @return The next postponed {@link ScheduledTask} that is now ready to try. - */ - protected synchronized ScheduledTask getReadyPostponedTask() { - // get the elapsed time and update the timestamp - long now = System.nanoTime(); - long elapsedNanos = now - this.postponedNanoTime; - long elapsedMillis = elapsedNanos / ONE_MILLION; - - // check the timestamp - if (elapsedMillis < this.getPostponedTimeout()) { - return null; - } - - // check if there are no postponed messages - if (this.postponedTasks.size() == 0) { - // since we have checked all the postponed messages (none) and none are - // ready then we need to update the timestamp - this.postponedNanoTime = now; - - return null; - } - - // iterate through the postponed messages - Iterator iter = this.postponedTasks.iterator(); - try { - while (iter.hasNext()) { - ScheduledTask task = iter.next(); - - // handle aborted tasks - if (this.skipIfAborted(task)) { - iter.remove(); - continue; - } - - // attempt to lock the task resources - this.timerStart(obtainLocks); - boolean locked = task.acquireLocks(this.getLockingService()); - this.timerPause(obtainLocks); - - if (locked) { - iter.remove(); - return task; - } - } - - } finally { - // check if we checked all the messages - if (!iter.hasNext()) { - // since we have checked all the postponed messages for readiness we - // can update the timestamp so we don't busy check again and again - this.postponedNanoTime = now; - } - } - - // if we get here without returning a message then return null - return null; - } - - /** - * Removes any aborted backing tasks from the specified {@link ScheduledTask}, - * tracks the aborted count and returns true if the specified - * {@link ScheduledTask} can be fully removed from the queue and ignored - * (i.e.: it has no more backing tasks). If not all backing tasks are - * aborted, then false is returned to indicate the task still - * needs to be handled. - * - * @param task The {@link ScheduledTask} to check if fully aborted and remove - * aborted tasks from. - * @return true if the specified {@link ScheduledTask} should be - * skipped because it is fully aborted, otherwise false. - */ - protected boolean skipIfAborted(ScheduledTask task) { - // remove any aborted tasks - int abortCount = task.removeAborted(); - this.taskAbortCount += abortCount; - - // check if aborted - if (task.getMultiplicity() == 0) { - if (task.isAllowingCollapse()) { - ScheduledTask collapse = this.taskCollapseLookup.get(task.getSignature()); - if (collapse == task) { - this.taskCollapseLookup.remove(task.getSignature()); - } else { - throw new IllegalStateException( - "Unexpected collapsing task in lookup. expected=[ " - + task + " ], found=[ " + collapse + " ]"); - } - } - return true; - } - - // return false if we get here - return false; - } - - /** - * Checks if a check should be performed against the readiness of the - * postponed tasks. This returns true if and only if there is - * at least one postponed task and the readiness check has not been - * performed within the configured postponed timeout. - * - * @return true if it is time to perform a postponed task - * readiness check, otherwise false. - */ - protected synchronized boolean isPostponedReadyCheckTime() { - // no need to do a ready check if no postponed messages - if (this.postponedTasks.size() == 0) { - return false; - } - - // get the elapsed time and update the timestamp - long now = System.nanoTime(); - long elapsedNanos = now - this.postponedNanoTime; - long elapsedMillis = elapsedNanos / ONE_MILLION; - - // check the timestamp - return (elapsedMillis >= this.getPostponedTimeout()); - } - - /** - * Returns a previously scheduled follow-up {@link ScheduledTask} that is now - * ready to be processed. If the resources that must be locked are not - * available for the follow-up task then it is left on the queue. If the - * last time this method was called was less than the - * {@linkplain #getPostponedTimeout() postpone timeout} then this method returns - * null so that the previously postponed tasks are not checked - * for readiness too frequently. Otherwise, this method will find the least - * recently postponed {@link ScheduledTask} whose set of affected resources - * (identified by {@link ResourceKey} instances) are not currently locked. - * If there are no postponed {@link ScheduledTask} instance that meet the - * readiness criteria, then null is returned. - * - * @return The next postponed {@link ScheduledTask} that is now ready to try. - * @throws ServiceExecutionException If a failure occurs in obtaining a - * follow-up task. - */ - protected synchronized ScheduledTask getReadyFollowUpTask() - throws ServiceExecutionException { - // get the current timestamp - long now = System.nanoTime(); - - // check if there are no follow-up messages - if (this.followUpTasks.size() <= 1) { - // we have no follow-up tasks in the cache, let's get some - List tasks = this.dequeueFollowUpTasks( - this.getFollowUpFetchCount()); - - // add the follow-up tasks - this.followUpTasks.addAll(tasks); - this.followUpRenewNanos = now + ((this.getFollowUpLeaseTime() / 2) * ONE_MILLION); - - // check if we still have no follow-up tasks - if (this.followUpTasks.size() == 0) { - // since we have checked all the follow-up messages (none) and none are - // ready then we need to update the timestamp - this.followUpNanoTime = now; - logDebug("RESET FOLLOW-UP CHECK TIME"); - - // return null since there are no follow-up tasks - return null; - } - } else if (now > this.followUpRenewNanos) { - int size = this.followUpTasks.size() - + this.inProgressFollowUpTasks.size(); - List renewList = new ArrayList<>(size); - renewList.addAll(this.inProgressFollowUpTasks.keySet()); - renewList.addAll(this.followUpTasks); - - // renew the leases on the follow-up tasks - this.renewFollowUpTasks(renewList); - } - - // iterate through the follow-up messages - Iterator iter = this.followUpTasks.iterator(); - try { - while (iter.hasNext()) { - // get the next follow-up task - ScheduledTask task = iter.next(); - - // attempt to lock the message resources - this.timerStart(obtainLocks); - boolean locked = task.acquireLocks(this.getLockingService()); - this.timerPause(obtainLocks); - - if (locked) { - iter.remove(); - this.inProgressFollowUpTasks.put(task, System.nanoTime()); - return task; - } - } - - } finally { - // check if we checked all the messages - if (!iter.hasNext()) { - // since we have checked all the follow-up messages for readiness we - // can update the timestamp so we don't busy check again and again - this.followUpNanoTime = now; - logDebug("RESET FOLLOW-UP CHECK TIME"); - } - } - - // if we get here without returning a message then return null - return null; - } - - /** - * Checks if a check should be performed against the readiness of the - * follow-up tasks. This returns true if and only if there is - * at least one follow-up task and the readiness check has not been - * performed within the configured follow-up timeout. - * - * @return true if it is time to perform a postponed task - * readiness check, otherwise false. - */ - protected synchronized boolean isFollowUpReadyCheckTime() { - // get the elapsed time and update the timestamp - long now = System.nanoTime(); - long elapsedNanos = now - this.followUpNanoTime; - long elapsedMillis = elapsedNanos / ONE_MILLION; - - // check the timestamp - return (elapsedMillis >= (this.getFollowUpDelay() / 2)); - } - - /** - * Enqueues the specified follow-up {@link Task} instance and persists it for - * future retrieval. A follow-up {@link Task} does not belong to a - * {@link TaskGroup} and therefore should have a null - * {@linkplain Task#getTaskGroup() task group property}. - * - * @param task The follow-up {@link Task} to enqueue. - * - * @throws IllegalArgumentException If any of the specified {@link Task} - * belongs - * to a {@link TaskGroup}. - * - * @throws ServiceExecutionException If a failure occurs in persisting the - * specified {@link Task} instances - * - */ - protected abstract void enqueueFollowUpTask(Task task) - throws ServiceExecutionException; - - /** - * Retrieves a number of follow-up tasks from persistent storage. - * This should mark the retrieved tasks as pending and should not return - * them again until at least after {@link #getFollowUpTimeout()} - * milliseconds has past. - * - * @param count The suggested number of follow-up tasks to retrieve from - * persistent storage. - * - * @return The {@link List} of follow-up {@link Task} instances retrieved - * from persistent storage. - * - * @throws ServiceExecutionException If a failure occurs in persisting the - * specified {@link Task} instances - */ - protected abstract List dequeueFollowUpTasks(int count) - throws ServiceExecutionException; - - /** - * Renews the leases on the specified follow-up tasks from persistent - * storage. This should mark the retrieved tasks as pending and update their - * expiration timestamps accordingly. The specified {@link ScheduledTask} - * instances should be directly modified via {@link - * ScheduledTask#setFollowUpExpiration(long)}. - * - * @param tasks The {@link ScheduledTask} instances for lease renewal. - * - * @throws ServiceExecutionException If a failure occurs in persisting the - * specified {@link Task} instances - */ - protected abstract void renewFollowUpTasks(List tasks) - throws ServiceExecutionException; - - /** - * Marks the specified follow-up task as complete and removes it from - * persistent storage and is no longer available for dequeue. - * - * @param task The {@link ScheduledTask} to be marked as completed. - * - * @throws ServiceExecutionException If a failure occurs in persisting the - * specified {@link Task} instances - */ - protected abstract void completeFollowUpTask(ScheduledTask task) - throws ServiceExecutionException; - - /** - * Calls the {@link #handleTasks()} function in a background thread after - * validating the current state of this instance. - */ - protected void backgroundHandleTasks() { - synchronized (this) { - // check if not "READY" - if (this.getState() != READY && this.getState() != ACTIVE) { - throw new IllegalStateException( - "Cannot call backgroundHandleTasks() if not in the " + READY - + " or " + ACTIVE + " state. Current state is " - + this.getState()); - } - - // check if already handling tasks - if (this.handlingTasks) { - throw new IllegalStateException( - "Cannot call handleTasks() when it has already been called and is " - + "still handling tasks."); - } - - // set the handling tasks flag - this.handlingTasks = true; - - // verify the handling thread is null - if (this.taskHandlingThread != null) { - throw new IllegalStateException( - "Task handling thread seems to already exist."); - } - - // create the thread - this.taskHandlingThread = new Thread(() -> { - TaskHandler taskHandler = this.getTaskHandler(); - Boolean ready = null; - int count = 0; - - try { - do { - if (count > 0) { - logInfo("****** STILL WAITING ON TASK HANDLER READINESS"); - } - count++; - ready = taskHandler.waitUntilReady(READY_TIMEOUT); - } while (FALSE.equals(ready)); - - } catch (InterruptedException e) { - logWarning("****** INTERRUPTED WHILE WAITING ON TASK HANDLER " - + "READINESS"); - e.printStackTrace(); - return; - } - - // check if ready state indicates a failure - if (ready == null) { - logWarning( - "****** TASK HANDLER HAS INDICATED A FAILURE PREVENTING " - + "READINESS (CHECK LOGS)"); - return; - } - - // check if ready state is false (should not get here) - if (FALSE.equals(ready)) { - logWarning( - "****** TASK HANDLER NEVER BECAME READY TO HANDLE TASKS"); - return; - } - - SUPPRESS_HANDLING_CHECK.set(true); - this.handleTasks(); - }); - - // start the thread - this.taskHandlingThread.start(); - } - } - - /** - * Provides a loop that continues to schedule and handle tasks as long as - * the {@link State} of this instance obtained from {@link #getState()} is - * indicates the service is {@linkplain State#isAvailable() available} or - * until there are no more pending or postponed tasks. If the state - * transitions such that the service is no longer {@linkplain - * State#isAvailable() available} the only previously scheduled tasks will - * be handled before the processing terminates. This method does not return - * until handling of the tasks is complete. - * - */ - protected void handleTasks() { - try { - if (!SUPPRESS_HANDLING_CHECK.get()) { - synchronized (this) { - if (this.getState().isAvailable()) { - throw new IllegalStateException( - "Cannot call handleTasks() if not in the " + READY + " or " - + ACTIVE + " state. Current state is " + this.getState()); - } - - // check if already handling tasks - if (this.handlingTasks) { - throw new IllegalStateException( - "Cannot call handleTasks() when it has already been called and " - + "tasks are still being handled."); - } - - // set the handling tasks flag - this.handlingTasks = true; - } - } - - // create the worker pool - synchronized (this) { - this.workerPool = new AsyncWorkerPool<>(this.getConcurrency()); - } - - // start the handling timer - this.timerStart(taskHandling, betweenTasks); - - // loop over the tasks - while (this.getState().isAvailable() - || this.getPendingTaskCount() > 0 - || this.getPostponedTaskCount() > 0 - || this.getLeasedFollowUpTaskCount() > 0) { - // dequeue a message - this.timerStart(dequeue, dequeueBlocking); - ScheduledTask task = this.dequeueTask(); - this.timerPause(dequeue); - - // check if we have a task - if (task != null) { - this.timerPause(betweenTasks); - this.timerStart(activelyHandling); - - // prep a task reference for the - final ScheduledTask currentTask = task; - final Timers timers = new Timers(); - timers.start(waitForWorker.toString()); - AsyncResult result = this.workerPool.execute(() -> { - try { - // handle the task - timers.start(handleTask.toString()); - currentTask.beginHandling(); - taskHandler.handleTask(currentTask.getAction(), - currentTask.getParameters(), - currentTask.getMultiplicity(), - this.createFollowUpScheduler(currentTask)); - timers.pause(handleTask.toString()); - - // in case of success mark it as handled - timers.start(markComplete.toString()); - currentTask.succeeded(); - timers.pause(markComplete.toString()); - - } catch (Exception e) { - // in case of exception mark it as failed - timers.start(markComplete.toString()); - currentTask.failed(e); - timers.pause(markComplete.toString()); - - } finally { - // remove from persistent store (mark completed) - if (currentTask.isFollowUp()) { - timers.start(completeFollowUp.toString()); - synchronized (this) { - this.inProgressFollowUpTasks.remove(currentTask); - } - this.completeFollowUpTask(currentTask); - timers.pause(completeFollowUp.toString()); - } - - // release any associated locks on the resources - timers.start(releaseLocks.toString()); - currentTask.releaseLocks(this.getLockingService()); - timers.pause(releaseLocks.toString()); - - // record statistics - this.recordStatistics(task, timers); - } + * The number of follow-up tasks that have been handled whether successful or + * failed. This excluded aborted tasks. + */ + private long followUpCompleteCount = 0L; - return new TaskResult(currentTask, timers); - }); + /** + * The number of times the + * {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} + * method has been successfully called. + */ + private long followUpSuccessCount = 0L; - this.handleAsyncResult(result); - } - this.timerStart(betweenTasks); - } + /** + * The number of times the + * {@link com.senzing.listener.service.MessageProcessor#process(JsonObject)} + * method has been called and thrown an exception. + */ + private long followUpFailureCount = 0L; - // when done, close out the worker pool - try { - // if we get here then all postponed tasks have been handled and we - // are no longer scheduling tasks -- time to wait for completion of - // in-flight tasks so they can be disposed - List> results = this.workerPool.close(); - for (AsyncResult result : results) { - this.handleAsyncResult(result); - } - } finally { - this.timerPause(taskHandling, - activelyHandling, - waitingForTasks, - waitingOnPostponed); + /** + * The number of task groups that have successfully completed. + */ + private long groupSuccessCount = 0L; - synchronized (this) { - this.handlingTasks = false; - this.workerPool = null; - this.notifyAll(); - } - } - - } catch (Exception e) { - e.printStackTrace(); - } - } - - /** - * Handles the {@link AsyncResult} from the {@link AsyncWorkerPool} after it - * is received. This extracts the {@link TaskResult} value and traps any - * exceptions (there should be none). It records the timings from the - * handling and calls {@link #postProcess(ScheduledTask)}. - * - * @param result The {@link AsyncResult} to handle, or null if - * no result was returned. - */ - protected void handleAsyncResult(AsyncResult result) { - if (result == null) { - return; - } - TaskResult taskResult = null; - - try { - taskResult = result.getValue(); - - } catch (Exception cannotHappen) { - // exceptions should be logged and consumed during processing and used - // to determine the disposability of the message/batch. - logError(cannotHappen, "UNEXPECTED EXCEPTION"); - throw new IllegalStateException(cannotHappen); - } - - ScheduledTask task = taskResult.getTask(); - this.timerStart(postProcess); - try { - this.postProcess(task); - } finally { - this.timerPause(postProcess); - } - } - - /** - * This method does nothing, but provides a hook so that it may be overridden - * to do any special handling on the {@link ScheduledTask} after it has been - * handled by the {@link TaskHandler}. - * - * @param task The {@link ScheduledTask} that was handled. - */ - protected void postProcess(ScheduledTask task) { - // do nothing - } - - /** - * Records the statistics pertaining to the specified {@link ScheduledTask} - * and using the specified {@link Timers} instance. - * - * @param scheduledTask The {@link ScheduledTask} that was completed. - * @param timers The {@link Timers} associated with the specified {@link - * ScheduledTask}. - */ - protected void recordStatistics(ScheduledTask scheduledTask, Timers timers) { - if (scheduledTask.isSuccessful() == null) { - logWarning("Statistics recorded for incomplete task: ", - scheduledTask); - return; - } - synchronized (this.getStatisticsMonitor()) { - // increment the scheduled task count - this.handleCount++; - if (scheduledTask.isSuccessful()) { - this.handleSuccessCount++; - } else { - this.handleFailureCount++; - } - - // check if this task is a follow-up - boolean followUp = scheduledTask.isFollowUp(); - if (followUp) { - this.followUpHandleCount++; - } else { - this.standardHandleCount++; - } - - int multiplicity = scheduledTask.getMultiplicity(); - if (followUp) { - // update the follow-up multiplicity stats - if (multiplicity > this.greatestFollowUpMultiplicity) { - this.greatestFollowUpMultiplicity = multiplicity; - } - } else { - // update the greatest multiplicity - if (multiplicity > this.greatestMultiplicity) { - this.greatestMultiplicity = multiplicity; - } - } - - // get the handling time - String timerKey = handleTask.toString(); - long handlingMillis = timers.getElapsedTime(timerKey); - this.totalHandlingTime += handlingMillis; - if (this.longestHandlingTime < handlingMillis) { - this.longestHandlingTime = handlingMillis; - } - - // iterate over the backing tasks - scheduledTask.getBackingTasks().forEach(task -> { - TaskGroup taskGroup = task.getTaskGroup(); - - // if we have a task group then handle group statistics - if (taskGroup != null) { - boolean concluding = taskGroup.isConcludingTask(task); - if (concluding) { - this.taskGroupCount++; - int taskCount = taskGroup.getTaskCount(); - if (this.greatestGroupSize < taskCount) { - this.greatestGroupSize = taskCount; - } - long roundTrip = taskGroup.getRoundTripTime(); - this.totalTaskGroupTime += roundTrip; - if (roundTrip > this.longestTaskGroupTime) { - this.longestTaskGroupTime = roundTrip; - } - TaskGroup.State state = taskGroup.getState(); - if (state == TaskGroup.State.SUCCESSFUL) { - this.groupSuccessCount++; - } else if (state == TaskGroup.State.FAILED) { - this.groupFailureCount++; - } - } - } - - // handle task statistics - if (followUp) { - // since follow-up tasks are collapsed into a single backing task - // then we need to add the multiplicity instead - this.followUpCompleteCount += multiplicity; - switch (task.getState()) { - case SUCCESSFUL: - this.followUpSuccessCount += multiplicity; - break; - case FAILED: - this.followUpFailureCount += multiplicity; - break; - default: - logWarning("UNEXPECTED POST-COMPLETION TASK STATE: " - + task.getState(), task); - } - } else { - this.taskCompleteCount++; - switch (task.getState()) { - case SUCCESSFUL: - this.taskSuccessCount++; - break; - case FAILED: - this.taskFailureCount++; - break; - default: - logWarning("UNEXPECTED POST-COMPLETION TASK STATE: " - + task.getState(), task); - } - - long taskTime = task.getRoundTripTime(); - if (this.longestTaskTime < taskTime) { - this.longestTaskTime = taskTime; - } - this.totalTaskTime += taskTime; - } - }); - } - } - - /** - * Gets the number of queued tasks that are pending. - * - * @return The number of pending tasks. - */ - protected synchronized int getPendingTaskCount() { - return this.pendingTasks.size(); - } - - /** - * Gets the number of postponed tasks. - * - * @return The number of postponed tasks. - */ - protected synchronized int getPostponedTaskCount() { - return this.postponedTasks.size(); - } - - /** - * Gets the number of follow-up tasks cached in memory. - * - * @return The number of follow-up tasks cached in memory. - */ - protected synchronized int getLeasedFollowUpTaskCount() { - return this.followUpTasks.size(); - } - - /** - * Default implementation of {@link - * SchedulingService#init(JsonObject,TaskHandler)} that will initialize the - * base properties and then call {@link #doInit(JsonObject)} to complete the - * configuration. This implementation will ensure that this function is - * called in the {@link State#UNINITIALIZED} and that the service transitions - * to the {@link State#READY} state at its conclusion. - * - * @param config The {@link JsonObject} describing the configuration. - * @param taskHandler The {@link TaskHandler} to use for handling tasks. - * @throws ServiceSetupException If a failure occurs. - */ - @Override - public void init(JsonObject config, TaskHandler taskHandler) - throws ServiceSetupException { - Objects.requireNonNull(taskHandler, - "The specified TaskHandler cannot be null"); - synchronized (this) { - if (this.getState() != UNINITIALIZED) { - throw new IllegalStateException( - "Cannot initialize if not in the " + UNINITIALIZED + " state: " - + this.getState()); - } - this.timerStart(initialize); - this.setState(INITIALIZING); - } - - try { - synchronized (this) { - // default to an empty JSON object if null - if (config == null) { - config = Json.createObjectBuilder().build(); - } - - this.lockingService = this.initLockingService(config); - this.setTaskHandler(taskHandler); - - this.concurrency = getConfigInteger(config, - CONCURRENCY_KEY, - 1, - this.getDefaultConcurrency()); - - // get the postponed timeout - this.postponedTimeout = getConfigLong(config, - POSTPONED_TIMEOUT_KEY, - 0L, - this.getDefaultPostponedTimeout()); - - // get the standard timeout - this.standardTimeout = getConfigLong(config, - STANDARD_TIMEOUT_KEY, - 0L, - this.getDefaultStandardTimeout()); - - // get the follow-up delay - this.followUpDelay = getConfigLong(config, - FOLLOW_UP_DELAY_KEY, - 0L, - this.getDefaultFollowUpDelay()); - - // get the follow-up timeout - this.followUpTimeout = getConfigLong(config, - FOLLOW_UP_TIMEOUT_KEY, - 0L, - this.getDefaultFollowUpTimeout()); - - // get the follow-up fetch - this.followUpFetch = getConfigInteger( - config, - FOLLOW_UP_FETCH_KEY, - 1, - this.getDefaultFollowUpFetchCount()); - - // check that the follow-up timeout is greater than follow-up delay - if (this.followUpTimeout < this.followUpDelay) { - throw new ServiceSetupException( - "The configured value for " + FOLLOW_UP_TIMEOUT_KEY + " (" - + this.followUpTimeout + ") cannot be less than the " - + "configured value for " + FOLLOW_UP_DELAY_KEY + " (" - + this.followUpDelay + ")."); - } - - // create the queues - this.pendingTasks = new LinkedList<>(); - this.postponedTasks = new LinkedList<>(); - this.followUpTasks = new LinkedList<>(); - this.inProgressFollowUpTasks = new IdentityHashMap<>(); - this.taskCollapseLookup = new LinkedHashMap<>(); - } - - // defer additional configuration - this.doInit(config); - - // set to the ready state - this.setState(READY); - this.backgroundHandleTasks(); - - } catch (Exception e) { - e.printStackTrace(); - this.setState(UNINITIALIZED); - throw new RuntimeException(e); - - } finally { - this.timerPause(initialize); - } - } - - /** - * The default implementation of this method gets the class name from - * the {@link #LOCKING_SERVICE_CLASS_KEY} parameter, constructs an instance - * of that class using the default constructor and then initializes the - * constructed {@link LockingService} instance using the {@link JsonObject} - * found in the specified configuration via the {@link - * #LOCKING_SERVICE_CONFIG_KEY} JSON property. - * - * @param jsonConfig The {@link JsonObject} describing the configuration - * for this instance of scheduling service. - * - * @return The {@link LockingService} that was created and initialized. - * @throws ServiceSetupException If a failure occurs in initializing the - * backing {@link LockingService}. - */ - @SuppressWarnings("unchecked") - protected LockingService initLockingService(JsonObject jsonConfig) - throws ServiceSetupException { - try { - // get the LockingService class name from the config - String className = getConfigString( - jsonConfig, - LOCKING_SERVICE_CLASS_KEY, - this.getDefaultLockingServiceClassName()); - - // get the LockingService Class object from the class name - Class lockServiceClass = Class.forName(className); - - if (!LockingService.class.isAssignableFrom(lockServiceClass)) { - throw new ServiceSetupException( - "The configured locking service class for the " - + LOCKING_SERVICE_CLASS_KEY + " config parameter must " - + "implement " + LockingService.class.getName()); - } - - // create an instance of the LockingService class - LockingService lockService = (LockingService) lockServiceClass.getConstructor().newInstance(); - - // get the locking service configuration - JsonObject lockServiceConfig = (jsonConfig.containsKey(LOCKING_SERVICE_CONFIG_KEY)) - ? getJsonObject(jsonConfig, LOCKING_SERVICE_CONFIG_KEY) - : this.getDefaultLockingServiceConfig(); - - // initialize the locking service - lockService.init(lockServiceConfig); - - // return the locking service - return lockService; - - } catch (ServiceSetupException e) { - throw e; - } catch (Exception e) { - throw new ServiceSetupException( - "Failed to initialize LockingService for SchedulingService", e); - } - } - - /** - * Called by the {@link #init(JsonObject,TaskHandler)} implementation after - * handling the - * base configuration parameters. - * - * @param config The {@link JsonObject} describing the configuration. - * - * @throws ServiceSetupException If a failure occurs during initialization. - */ - protected abstract void doInit(JsonObject config) - throws ServiceSetupException; - - /** - * Implemented as a synchronized method to {@linkplain #setState(State) - * set the state} to {@link State#DESTROYING}, call {@link #doDestroy()} and - * then perform {@link #notifyAll()} and set the state to {@link - * State#DESTROYED}. - */ - public void destroy() { - synchronized (this) { - State state = this.getState(); - if (state == DESTROYED) { - return; - } - - if (state == DESTROYING) { - while (this.getState() != DESTROYED) { - try { - this.wait(this.getStandardTimeout()); - } catch (InterruptedException e) { - // ignore - } - } - // once DESTROYED state is found, just return - return; - } - - // begin destruction - this.setState(DESTROYING); - this.timerStart(destroy); - - // wait until no longer handling tasks - while (this.isHandlingTasks()) { - try { - this.wait(this.getStandardTimeout()); + /** + * The greatest number of tasks encountered for a task group. + */ + private int greatestGroupSize = 0; - } catch (InterruptedException ignore) { - // do nothing - } - } - } - - // join against the scheduler thread - try { - this.taskHandlingThread.join(); - - } catch (InterruptedException ignore) { - // ignore the exception - } - - try { - // now complete the destruction / cleanup - this.doDestroy(); - - // destroy the locking service - this.lockingService.destroy(); - - } finally { - this.setState(DESTROYED); // this should notify all as well - this.timerPause(destroy); - } - } - - /** - * This is called from the {@link #destroy()} implementation and should be - * overridden by the concrete sub-class. - */ - protected abstract void doDestroy(); - - /** - * Converts the specified {@link Stat} instances to an array - * of {@link String} instances. - * - * @param statistics The {@link Stat} instances to convert. - * - * @return The array of {@link String} instances describing the specified - * {@link Stat} instances. - */ - private String[] convertTimerKeys(Stat... statistics) { - String[] names = (statistics == null || statistics.length == 0) - ? null - : new String[statistics.length]; - if (names != null) { - for (int index = 0; index < statistics.length; index++) { - names[index] = statistics[index].toString(); - } - } - return names; - } - - /** - * Merges the specified {@link Timers} with this instances {@link Timers} - * in a thread safe manner. - * - * @param timers The {@link Timers} to merge. - */ - protected void timerMerge(Timers timers) { - synchronized (this.getStatisticsMonitor()) { - this.timers.mergeWith(timers); - } - } - - /** - * Toggles the active and waiting timers. - * - * @param pendingCount The number of pending messages. - * @param postponedCount The number of postponed messages. - * @param busy true if the worker pool is busy, otherwise - * false. - */ - protected void toggleActiveAndWaitingTimers(int pendingCount, - int postponedCount, - boolean busy) { - synchronized (this.getStatisticsMonitor()) { - // check if there are messages - if (busy) { - this.timerPause(waitingForTasks, waitingOnPostponed); - this.timerStart(activelyHandling); - - } else if (pendingCount == 0 && postponedCount == 0) { - // no tasks pending or postponed - this.timerPause(activelyHandling, waitingOnPostponed); - this.timerStart(waitingForTasks); - - } else if (pendingCount > 0) { - // messages pending - this.timerPause(waitingForTasks, waitingOnPostponed); - this.timerStart(activelyHandling); - - } else if (postponedCount > 0) { - // none pending, but some postponed - this.timerPause(activelyHandling, waitingForTasks); - this.timerStart(waitingOnPostponed); - } - } - } - - /** - * Resumes the associated {@link Timers} in a thread-safe manner. - * - * @param statistic The {@link Stat} to resume. - * @param addlTimers The additional {@link Stat} instances to - * resume. - */ - protected void timerResume(Stat statistic, - Stat... addlTimers) { - String[] names = this.convertTimerKeys(addlTimers); - synchronized (this.getStatisticsMonitor()) { - if (names == null) { - this.timers.resume(statistic.toString()); - } else { - this.timers.resume(statistic.toString(), names); - } - } - } - - /** - * Starts the associated {@link Timers} in a thread-safe manner. - * - * @param statistic The {@link com.senzing.listener.communication.AbstractMessageConsumer.Stat} to start. - * @param addlTimers The additional {@link Stat} instances to - * start. - */ - protected void timerStart(Stat statistic, - Stat... addlTimers) { - String[] names = this.convertTimerKeys(addlTimers); - synchronized (this.getStatisticsMonitor()) { - if (names == null) { - this.timers.start(statistic.toString()); - } else { - this.timers.start(statistic.toString(), names); - } - } - } - - /** - * Pauses the associated {@link Timers} in a thread-safe manner. - * - * @param statistic The {@link Stat} to pause. - * @param addlTimers The additional {@link Stat} instances to - * pause. - */ - protected void timerPause(Stat statistic, - Stat... addlTimers) { - String[] names = this.convertTimerKeys(addlTimers); - synchronized (this.getStatisticsMonitor()) { - if (names == null) { - this.timers.pause(statistic.toString()); - } else { - this.timers.pause(statistic.toString(), names); - } - } - } - - /** - * Gets the {@link Map} of {@link com.senzing.listener.communication.AbstractMessageConsumer.Stat} keys to - * their {@link Number} values in an atomic thread-safe manner. - * - * @return The {@link Map} of {@link com.senzing.listener.communication.AbstractMessageConsumer.Stat} keys - * to their {@link Number} values. - */ - @Override - public Map getStatistics() { - synchronized (this.getStatisticsMonitor()) { - Number value = null; - Map timings = this.timers.getTimings(); - - Map statsMap = new LinkedHashMap<>(); - - statsMap.put(AbstractSchedulingService.Stat.concurrency, this.getConcurrency()); - statsMap.put(AbstractSchedulingService.Stat.standardTimeout, this.getStandardTimeout()); - statsMap.put(AbstractSchedulingService.Stat.postponedTimeout, this.getPostponedTimeout()); - statsMap.put(AbstractSchedulingService.Stat.followUpDelay, this.getFollowUpDelay()); - statsMap.put(AbstractSchedulingService.Stat.followUpTimeout, this.getFollowUpTimeout()); - - value = this.getAverageTaskTime(); - if (value != null) { - statsMap.put(averageTaskTime, value); - } - - value = this.getAverageTaskGroupTime(); - if (value != null) { - statsMap.put(averageTaskGroupTime, value); - } - - value = this.getLongestTaskTime(); - if (value != null) { - statsMap.put(AbstractSchedulingService.Stat.longestTaskTime, value); - } - - value = this.getLongestTaskGroupTime(); - if (value != null) { - statsMap.put(AbstractSchedulingService.Stat.longestTaskGroupTime, value); - } - - statsMap.put(AbstractSchedulingService.Stat.taskCompleteCount, this.getCompletedTaskCount()); - statsMap.put(AbstractSchedulingService.Stat.taskSuccessCount, this.getSuccessfulTaskCount()); - statsMap.put(AbstractSchedulingService.Stat.taskFailureCount, this.getFailedTaskCount()); - statsMap.put(AbstractSchedulingService.Stat.taskAbortCount, this.getAbortedTaskCount()); - statsMap.put(AbstractSchedulingService.Stat.followUpCompleteCount, - this.getCompletedFollowUpCount()); - statsMap.put(AbstractSchedulingService.Stat.followUpSuccessCount, - this.getSuccessfulFollowUpCount()); - statsMap.put(AbstractSchedulingService.Stat.followUpFailureCount, - this.getFailedFollowUpCount()); - - value = this.getAverageHandleTaskTime(); - if (value != null) { - statsMap.put(averageHandleTask, value); - } - - statsMap.put(handleTaskCount, this.getHandleTaskCount()); - statsMap.put(handleTaskSuccessCount, this.getSuccessfulHandleTaskCount()); - statsMap.put(handleTaskFailureCount, this.getFailedHandleTaskCount()); - - value = this.getFollowUpHandleTaskRatio(); - if (value != null) { - statsMap.put(followUpHandleTaskRatio, value); - } - - statsMap.put(taskGroupCompleteCount, this.getCompletedTaskGroupCount()); - statsMap.put(taskGroupSuccessCount, this.getSuccessfulTaskGroupCount()); - statsMap.put(taskGroupFailureCount, this.getFailedTaskGroupCount()); - - value = this.getAverageCompressionRatio(); - if (value != null) { - statsMap.put(averageCompression, value); - } - - value = this.getGreatestCompressionRatio(); - if (value != null) { - statsMap.put(greatestCompression, value); - } - - value = this.getAverageFollowUpCompressionRatio(); - if (value != null) { - statsMap.put(averageFollowUpCompression, value); - } - - value = this.getGreatestFollowUpCompressionRatio(); - if (value != null) { - statsMap.put(greatestFollowUpCompression, value); - } - - value = this.getAverageTaskGroupSize(); - if (value != null) { - statsMap.put(averageTaskGroupSize, value); - } - - value = this.getGreatestTaskGroupSize(); - if (value != null) { - statsMap.put(greatestTaskGroupSize, value); - } - - value = this.getParallelism(); - if (value != null) { - statsMap.put(parallelism, value); - } - - value = this.getDequeueHitRatio(); - if (value != null) { - statsMap.put(dequeueHitRatio, value); - } - - statsMap.put(AbstractSchedulingService.Stat.greatestPostponedCount, - this.getGreatestPostponedCount()); - - // now get the timings - for (Stat statistic : AbstractSchedulingService.Stat.values()) { - value = timings.get(statistic.toString()); - if (value != null) { - statsMap.put(statistic, value); - } - } - - return statsMap; - } - } - - /** - * Gets the average task compression from collapsing non-follow-up tasks - * handled by the scheduling service. This returns null if no - * non-follow-up tasks have been handled. - * - * @return The average task compression from collapsing non-follow-up tasks - * handled by the scheduling service, or null if no - * non-follow-up tasks have been handled. - */ - public Double getAverageCompressionRatio() { - synchronized (this.getStatisticsMonitor()) { - if (this.standardHandleCount == 0) { - return null; - } - double completeCount = (double) this.taskCompleteCount; - double handleCount = (double) this.standardHandleCount; - return completeCount / handleCount; - } - } - - /** - * Gets the greatest task compression from collapsing non-follow-up tasks - * handled by the scheduling service. This returns null if - * no tasks have been handled. - * - * @return The greatest task compression from collapsing non-follow-up tasks - * handled by the scheduling service, or null if no - * tasks have been handled. - */ - public Integer getGreatestCompressionRatio() { - synchronized (this.getStatisticsMonitor()) { - if (this.greatestMultiplicity <= 0) { - return null; - } - return this.greatestMultiplicity; - } - } - - /** - * Gets the average task compression from collapsing follow-up tasks - * handled by the scheduling service. This returns null if no - * follow-up tasks have been handled. - * - * @return The average task compression from collapsing follow-up tasks - * handled by the scheduling service, or null if no - * follow-up tasks have been handled. - */ - public Double getAverageFollowUpCompressionRatio() { - synchronized (this.getStatisticsMonitor()) { - if (this.followUpHandleCount == 0) { - return null; - } - double completeCount = (double) this.followUpCompleteCount; - double handleCount = (double) this.followUpHandleCount; - return completeCount / handleCount; - } - } - - /** - * Gets the greatest task compression from collapsing follow-up tasks - * handled by the scheduling service. This returns null if - * no follow-up tasks have been handled. - * - * @return The greatest task compression from collapsing follow-up tasks - * handled by the scheduling service, or null if no - * follow-up tasks have been handled. - */ - public Integer getGreatestFollowUpCompressionRatio() { - synchronized (this.getStatisticsMonitor()) { - if (this.greatestFollowUpMultiplicity <= 0) { - return null; - } - return this.greatestFollowUpMultiplicity; - } - } - - /** - * Gets the average number of tasks in all the completed task groups. This - * returns null if no task groups have been completed. - * - * @return The average number of tasks in all the completed task groups, or - * null if no task groups have been completed. - */ - public Double getAverageTaskGroupSize() { - synchronized (this.getStatisticsMonitor()) { - if (this.taskGroupCount == 0) { - return null; - } - double completeCount = (double) this.taskCompleteCount; - double groupCount = (double) this.taskGroupCount; - return (completeCount / groupCount); - } - } - - /** - * Gets the dequeue hit ratio. This returns null if there have - * been no attempts to dequeue a task. - * - * @return The dequeue hit ratio, or null if no attempts have - * been made to dequeue a task. - */ - public Double getDequeueHitRatio() { - synchronized (this.getStatisticsMonitor()) { - if ((this.dequeueHitCount + this.dequeueMissCount) == 0) { - return null; - } - double hits = (double) this.dequeueHitCount; - double misses = (double) this.dequeueMissCount; - double total = hits + misses; - return (hits / total); - } - } - - /** - * Call this to increment the number of times dequeue has been called with - * or without a task ready to be dequeued. This function is thread-safe - * with respect to other statistics. - * - * @param hit true if we have a "hit" and there is a task ready - * to be dequeued, otherwise false for a "miss". - */ - protected void updateDequeueHitRatio(boolean hit) { - synchronized (this.getStatisticsMonitor()) { - if (hit) { - this.dequeueHitCount++; - } else { - this.dequeueMissCount++; - } - } - } - - /** - * The average time in milliseconds that non-follow-up tasks have taken from - * scheduling until completion. This returns null if no - * non-follow-up tasks have been handled. - * - * @return The average time in milliseconds that non-follow-up tasks have - * taken from scheduling until completion, or null if - * no non-follow-up tasks have been handled. - */ - public Double getAverageTaskTime() { - synchronized (this.getStatisticsMonitor()) { - if (this.taskCompleteCount == 0) { - return null; - } - double totalTime = (double) this.totalTaskTime; - double completeCount = (double) this.taskCompleteCount; - return totalTime / completeCount; - } - } - - /** - * The longest time in milliseconds that a non-follow-up task has taken from - * scheduling until completion. This returns null if no - * non-follow-up tasks have been handled. - * - * @return The longest time in milliseconds that a non-follow-up task has - * taken from scheduling until completion, or null if - * no non-follow-up tasks have been handled. - */ - public Long getLongestTaskTime() { - synchronized (this.getStatisticsMonitor()) { - if (this.longestTaskTime < 0) { - return null; - } - return this.longestTaskTime; - } - } - - /** - * Gets the average number of milliseconds from all task groups to be - * handled from the time first task in the group was scheduled until the last - * task was completed. This returns null if no task groups - * have been completed. - * - * @return The average number of milliseconds from all task groups to be - * handled from the time first task in the group was scheduled until - * the last task was completed, or null if no task - * groups have been completed. - */ - public Double getAverageTaskGroupTime() { - synchronized (this.getStatisticsMonitor()) { - if (this.taskGroupCount == 0) { - return null; - } - double totalTime = (double) this.totalTaskGroupTime; - double groupCount = (double) this.taskGroupCount; - return totalTime / groupCount; - } - } - - /** - * Gets the greatest number of milliseconds for a task groups to be handled - * from the time first task in the group was scheduled until the last task - * was completed. This returns null if no task groups have been - * completed. - * - * @return The greatest number of milliseconds for a task groups to be handled - * from the time first task in the group was scheduled until the last - * task was completed, or null if no task groups have - * been completed. - */ - public Long getLongestTaskGroupTime() { - synchronized (this.getStatisticsMonitor()) { - if (this.longestTaskGroupTime < 0L) { - return null; - } - return this.longestTaskGroupTime; - } - } - - /** - * Gets the number of non-follow-up tasks that have been completed. - * - * @return The number of non-follow-up tasks that have been completed. - */ - public long getCompletedTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.taskCompleteCount; - } - } - - /** - * Gets the number of non-follow-up tasks that have been completed - * successfully. - * - * @return The number of non-follow-up tasks that have been completed - * successfully. - */ - public long getSuccessfulTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.taskSuccessCount; - } - } - - /** - * Gets the number of non-follow-up tasks that have been completed - * unsuccessfully (i.e.: with failures). - * - * @return The number of non-follow-up tasks that have been completed - * unsuccessfully (i.e.: with failures). - */ - public long getFailedTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.taskFailureCount; - } - } - - /** - * Gets the number of non-follow-up tasks that were aborted. - * - * @return The number of non-follow-up tasks that were aborted. - */ - public long getAbortedTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.taskAbortCount; - } - } - - /** - * Gets the number of follow-up tasks that have been completed. - * - * @return The number of follow-up tasks that have been completed. - */ - public long getCompletedFollowUpCount() { - synchronized (this.getStatisticsMonitor()) { - return this.followUpCompleteCount; - } - } - - /** - * Gets the number of follow-up tasks that have been completed successfully. - * - * @return The number of follow-up tasks that have been completed - * successfully. - */ - public long getSuccessfulFollowUpCount() { - synchronized (this.getStatisticsMonitor()) { - return this.followUpSuccessCount; - } - } - - /** - * Gets the number of follow-up tasks that have been completed unsuccessfully - * (i.e.: with failures). - * - * @return The number of follow-up tasks that have been completed - * successfully (i.e.: with failures). - */ - public long getFailedFollowUpCount() { - synchronized (this.getStatisticsMonitor()) { - return this.followUpFailureCount; - } - } - - /** - * Get the average number of milliseconds spent calling {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} for tasks (both - * follow-up - * and non-follow-up). If no tasks have been handled then null - * is returned. - * - * @return The average number of milliseconds spent calling {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} for tasks, or - * null if no tasks have been handled. - */ - public Double getAverageHandleTaskTime() { - synchronized (this.getStatisticsMonitor()) { - if (this.handleCount == 0) { - return null; - } - double totalTime = ((double) this.totalHandlingTime); - double callCount = ((double) this.handleCount); - return totalTime / callCount; - } - } - - /** - * Get the total number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called to - * handle tasks (both follow-up and non-follow-up). - * - * @return The total number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called - * to handle tasks (both follow-up and non-follow-up). - */ - public long getHandleTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.handleCount; - } - } - - /** - * Get the total number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called to - * handle tasks successfully (both follow-up and non-follow-up). - * - * @return The total number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called - * to handle tasks successfully (both follow-up and non-follow-up). - */ - public long getSuccessfulHandleTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.handleSuccessCount; - } - } - - /** - * Get the total number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called to - * handle tasks unsuccessfully (both follow-up and non-follow-up). - * - * @return The total number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called - * to handle tasks unsuccessfully (both follow-up and non-follow-up). - */ - public long getFailedHandleTaskCount() { - synchronized (this.getStatisticsMonitor()) { - return this.handleFailureCount; - } - } - - /** - * Gets the ratio of the number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called to - * handle - * follow-up tasks to the number of times it has been called to handle - * all tasks that have been handled. This returns null - * if no tasks have been handled. - * - * @return The ratio of the number of times {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} has been called - * to handle follow-up tasks to the number of times it has been - * called to handle all tasks that have been handled, or - * null if no tasks have been handled. - */ - public Double getFollowUpHandleTaskRatio() { - synchronized (this.getStatisticsMonitor()) { - if (this.handleCount == 0) { - return null; - } - double followUp = ((double) this.followUpHandleCount); - double all = ((double) this.handleCount); - return followUp / all; - } - } - - /** - * Gets the number of {@link TaskGroup} instances that have been folly - * handled (whether successful or not). - * - * @return The number of {@link TaskGroup} instances that have been folly - * handled (whether successful or not). - */ - public long getCompletedTaskGroupCount() { - synchronized (this.getStatisticsMonitor()) { - return this.taskGroupCount; - } - } - - /** - * Gets the number of task groups that have been successfully completed. - * - * @return The number of task groups that have been successfully completed. - */ - public long getSuccessfulTaskGroupCount() { - synchronized (this.getStatisticsMonitor()) { - return this.groupSuccessCount; - } - } - - /** - * Gets the number of task groups that have been completed with failures. - * - * @return The number of task groups that have been completed with failures. - */ - public long getFailedTaskGroupCount() { - synchronized (this.getStatisticsMonitor()) { - return this.groupFailureCount; - } - } - - /** - * The greatest number of tasks in the completed task groups. This returns - * null if no task groups have been completed. - * - * @return The greatest number of tasks in the completed task groups, or - * null if no task groups have been completed. - */ - public Integer getGreatestTaskGroupSize() { - synchronized (this.getStatisticsMonitor()) { - if (this.greatestGroupSize <= 0) { - return null; - } - return this.greatestGroupSize; - } - } - - /** - * Gets the ratio of the total handling time across all threads to the - * total active handling of the task scheduler to indicate the level - * of parallelism achieved. This returns null if no tasks have - * yet been handled. - * - * @return The ratio of the total handling time across all threads to the - * total active handling time of the task scheduler, or - * null if no tasks have been handled. - */ - public Double getParallelism() { - synchronized (this.getStatisticsMonitor()) { - String timerKey = activelyHandling.toString(); - Long activeTime = this.timers.getElapsedTime(timerKey); - if (activeTime == 0L) { - return null; - } - Double totalTime = (double) this.totalHandlingTime; - return (totalTime / ((double) activeTime)); - } - } + /** + * The number of task groups that have completed with failures. + */ + private long groupFailureCount = 0L; - /** - * Gets the greatest number of tasks that have been postponed. - * - * @return The greatest number of tasks that have been postponed. - */ - public int getGreatestPostponedCount() { - return this.greatestPostponedCount; - } + /** + * The number of {@link ScheduledTask} instances handled. Each + * {@link ScheduledTask} may be backed by multiple duplicate actual {@link Task} + * instances. + */ + private long handleCount = 0L; - /** - * Encapsulates a scheduled {@link Task} and all duplicates of that {@link - * Task} assuming the tasks can be collapsed. - * - */ - protected static class ScheduledTask { /** - * The original backing task ID. + * The number of non-follow-up {@link ScheduledTask} instances handled. Each + * {@link ScheduledTask} may be backed by multiple duplicate actual {@link Task} + * instances. */ - private long origTaskId; + private long standardHandleCount = 0L; /** - * Flag indicating if this contains follow-up tasks or non-follow-up - * tasks. + * The number of follow-up {@link ScheduledTask} instances handled. Each + * {@link ScheduledTask} may be backed by multiple duplicate actual {@link Task} + * instances. */ - private boolean followUp; + private long followUpHandleCount = 0L; /** - * The external follow-up ID to reference the task in persistent storage. + * The number of {@link ScheduledTask} instances handled successfully. Each + * {@link ScheduledTask} may be backed by multiple duplicate actual {@link Task} + * instances. */ - private String followUpId; + private long handleSuccessCount = 0L; /** - * The follow-up multiplicity since the follow-up tasks lack backing tasks. + * The number of {@link ScheduledTask} instances handled unsuccessfully. Each + * {@link ScheduledTask} may be backed by multiple duplicate actual {@link Task} + * instances. */ - private Integer multiplicity = null; + private long handleFailureCount = 0L; /** - * The nanosecond when this scheduled task is considered to be expired. + * The greatest task multiplicity encountered for non-follow-up tasks. */ - private Long expirationNanos = null; + private int greatestMultiplicity = 0; /** - * The action associated with the associated tasks. + * The greatest task multiplicity encountered for follow-up tasks. */ - private String action; + private int greatestFollowUpMultiplicity = 0; /** - * The parameters for the associated tasks. + * The total number of times an attempt was made to dequeue a message and one + * was ready. */ - private SortedMap parameters; + private long dequeueHitCount = 0L; /** - * The resource keys for the associated tasks. + * The total number of times an attempt was made to dequeue a message and one + * was not ready to be dequeued. */ - private SortedSet resourceKeys; + private long dequeueMissCount = 0L; /** - * The {@link List} of duplicate {@link Task} instances. + * The greatest number of info messages that are postponed at any one time. */ - private List backingTasks; + private int greatestPostponedCount = 0; /** - * The signature for the tasks. + * The object used for synchronization when working with statistics. */ - private String signature; + private final Object statsMonitor = new Object(); /** - * Flag indicating if this instance allows collapsing duplicate tasks. + * The processing {@link Timers}. */ - private boolean allowCollapse = false; + private final Timers timers = new Timers(); /** - * Flag indicating if the task has succeeded. + * Flag to use to suppress checking if already handling tasks when backgrounding + * task handling. */ - private Boolean successful = null; + private static final ThreadLocal SUPPRESS_HANDLING_CHECK = new ThreadLocal<>(); /** - * The {@link LockToken} for the resources that are locked for this task. + * Default constructor. */ - private LockToken lockToken = null; + protected AbstractSchedulingService() { + this.taskHandler = null; + this.lockingService = null; + this.state = UNINITIALIZED; + this.taskTypeIndex = 0; + this.taskTypeOrder = Arrays.asList(TaskType.values()); + } /** - * Constructs with the first backing actual {@link Task}. + * Gets the {@link State} of this instance. * - * @param task The actual {@link Task} that will back this instance. + * @return The {@link State} of this instance. */ - public ScheduledTask(Task task) { - this.origTaskId = task.getTaskId(); - this.followUp = task.getTaskGroup() == null; - this.backingTasks = new LinkedList<>(); - this.action = task.getAction(); - this.parameters = task.getParameters(); - this.resourceKeys = task.getResourceKeys(); - this.signature = task.getSignature(); - this.allowCollapse = task.isAllowingCollapse(); - this.lockToken = null; - this.successful = null; - this.expirationNanos = null; - task.markScheduled(); - this.backingTasks.add(task); + public synchronized State getState() { + return this.state; } /** - * Gets the task ID of the original backing task with which this instance - * was constructed. + * Provides a means to set the {@link State} for this instance as a synchronized + * method that will notify all upon changing the state. * - * @return The task ID of the original backing task with which this - * instance was constructed. + * @param state The {@link State} for this instance. */ - public long getOriginalBackingTaskId() { - return this.origTaskId; + protected synchronized void setState(State state) { + Objects.requireNonNull(state, "State cannot be null"); + this.state = state; + this.notifyAll(); } /** - * Overridden to return a diagnostic {@link String} describing this - * instance. + * Checks if this instance is current handling tasks. This is used to + * synchronize destruction. The {@link #doDestroy()} method is not called until + * task handling ceases. * - * @return A diagnostic {@link String} describing this instance. - */ - @Override - public String toString() { - JsonObjectBuilder job = Json.createObjectBuilder(); - JsonUtilities.add(job, "originalBackingTaskId", - this.getOriginalBackingTaskId()); - JsonUtilities.add(job, "signature", this.getSignature()); - JsonUtilities.add(job, "allowCollapse", this.isAllowingCollapse()); - JsonUtilities.add(job, "multiplicity", this.getMultiplicity()); - JsonUtilities.add(job, "followUp", this.isFollowUp()); - if (this.expirationNanos != null) { - JsonUtilities.add(job, "followUpId", this.getFollowUpId()); - long deltaNanos = this.expirationNanos - System.nanoTime(); - long deltaMillis = deltaNanos / ONE_MILLION; - long expireMillis = System.currentTimeMillis() + deltaMillis; - Date expireDate = new Date(expireMillis); - JsonUtilities.add(job, "expiration", expireDate.toString()); - } - JsonUtilities.add(job, "successful", this.isSuccessful()); - JsonUtilities.add(job, "action", this.getAction()); - job.add("parameters", - JsonUtilities.toJsonObjectBuilder(this.getParameters())); - JsonArrayBuilder jab = Json.createArrayBuilder(); - for (ResourceKey resourceKey : this.getResourceKeys()) { - jab.add(resourceKey.toString()); - } - job.add("resources", jab); - return "\n" + JsonUtilities.toJsonText(job.build()); - } - - /** - * Constructor for deserializing a follow-up task from persistent storage. - * - * @param jsonText The serialized JSON representation of the - * follow-up task. - * @param followUpId The optional external persistence ID for - * the follow-up - * task so it can be later marked complete in - * and deleted - * from persistent storage. - * @param multiplicity The collapsed multiplicity from persistent - * storage, - * which may be one (1) if the follow-up task - * did not - * allow collapsing with duplicate tasks. - * @param expirationTime The millisecond UTC time since then epoch - * when the - * follow-up task is considered to be - * "expired". - * @param elapsedMillisSinceCreation The number of milliseconds that have - * elapsed since the deserialized task - * was originally created, or - * null if unknown. - */ - public ScheduledTask(String jsonText, - String followUpId, - int multiplicity, - long expirationTime, - long elapsedMillisSinceCreation) { - this(Task.deserialize(jsonText, - false, - elapsedMillisSinceCreation)); - - this.followUp = true; - this.followUpId = followUpId; - this.allowCollapse = false; - this.multiplicity = multiplicity; - - // determine the expiration in a consistent manner - long now = System.currentTimeMillis(); - long remainingNanos = (expirationTime - now) * ONE_MILLION; - this.expirationNanos = System.nanoTime() + remainingNanos; - } - - /** - * Checks if the actual tasks backing this instance are follow-up tasks. - * Either all the tasks are follow-up tasks or all are not - * follow-up tasks. - * - * @return true if the tasks are follow-up tasks, otherwise + * @return true if this instance is still handling tasks, otherwise * false. + * */ - public boolean isFollowUp() { - return this.followUp; + protected synchronized boolean isHandlingTasks() { + return this.handlingTasks; } /** - * This method always returns false if not a follow-up task. - * If this is a follow-up task then this returns true if the - * follow-up task is expired, otherwise false. + * The {@link Object} to synchronize on when computing and recording statistics + * in a thread-safe manner. * - * @return true if this is an expired follow-up task, otherwise - * false. + * @return The {@link Object} to synchronize on when computing and recording + * statistics in a thread-safe manner. */ - public boolean isFollowUpExpired() { - if (this.expirationNanos == null) { - return false; - } - return System.nanoTime() > this.expirationNanos; + protected final Object getStatisticsMonitor() { + return this.statsMonitor; } /** - * Updates the expiration time to the specified number of milliseconds - * since the epoch in UTC time coordinates. + * Gets the concurrency of the scheduler -- this is the number of threads it + * will use to handle tasks. The returned value will be a positive number + * greater than or equal to one (1). * - * @param expiration The expiration time in number of milliseconds since - * the epoch in UTC time coordinates. + * @return The concurrency of the scheduler (i.e.: the number of threads it will + * use to handle tasks). */ - public void setFollowUpExpiration(long expiration) { - // determine the expiration in a consistent manner - long now = System.currentTimeMillis(); - long remainingNanos = (expiration - now) * ONE_MILLION; - this.expirationNanos = System.nanoTime() + remainingNanos; + public int getConcurrency() { + return this.concurrency; } /** - * Obtains the external ID used to identify the deserialized follow-up - * task in persistent storage. This should always return null - * if {@link #isFollowUp()} is false. This may return - * null if {@link #isFollowUp()} is true if the - * external persistent storage mechanism does not require an external ID. + * Gets the default concurrency with which to initialize if one is not specified + * in the initialization configuration via the {@link #CONCURRENCY_KEY} + * initialization parameter. By default, this returns + * {@link #DEFAULT_CONCURRENCY}, but it may be overridden to return something + * more sensible for a derived implementation. + * + * @return The default concurrency with which to initialize. * - * @return The external ID used to identify the deserialized follow-up - * task in persistent storage. + * @see #getConcurrency() + * @see #CONCURRENCY_KEY + * @see #DEFAULT_CONCURRENCY */ - public String getFollowUpId() { - return this.followUpId; + public int getDefaultConcurrency() { + return DEFAULT_CONCURRENCY; } /** - * Removes all backing tasks that have been flagged as aborted and - * returns the remaining number of backing tasks. If no backing tasks - * remain then this {@link ScheduledTask} should itself be aborted. + * Gets the number of milliseconds to sleep between checks on the locks required + * for tasks that have been postponed due to contention. This timeout is used + * when there are pending tasks that have been postponed due to contention. * - * @return The number of backing tasks that were removed because they were - * aborted. + * @return The number of milliseconds to sleep between checks on the locks + * required for tasks that have been postponed due to contention. */ - public synchronized int removeAborted() { - if (this.isFollowUp()) { - return 0; - } - - int removedCount = 0; - Iterator iter = this.backingTasks.iterator(); - while (iter.hasNext()) { - // get the next task - Task task = iter.next(); + public long getPostponedTimeout() { + return this.postponedTimeout; + } - // get the task group, not a follow-up so we should always have one - TaskGroup group = task.getTaskGroup(); + /** + * Gets the default postponed timeout with which to initialize if one is not + * specified in the initialization configuration via the + * {@link #POSTPONED_TIMEOUT_KEY} initialization parameter. By default, this + * returns {@link #DEFAULT_POSTPONED_TIMEOUT}, but it may be overridden to + * return something more sensible for a derived implementation. + * + * @return The default postponed timeout with which to initialize. + * + * @see #getPostponedTimeout() + * @see #POSTPONED_TIMEOUT_KEY + * @see #DEFAULT_POSTPONED_TIMEOUT + */ + public long getDefaultPostponedTimeout() { + return DEFAULT_POSTPONED_TIMEOUT; + } - // check if the group is fast-fail, if not then no abort - if (!group.isFastFail()) { - continue; - } + /** + * Gets the number of milliseconds to delay before attempting to execute a + * follow-up task. This delay is used to give the opportunity to receive + * duplicate follow-up tasks that can be collapsed. Whenever a duplicate is + * collapsed, the delay timer starts over unless the + * {@linkplain #getFollowUpTimeout() maximum follow-up deferral time} has been + * reached. + * + * @return The number of milliseconds to delay before attempting to execute a + * follow-up task. + */ + public long getFollowUpDelay() { + return this.followUpDelay; + } - // check if the group has failed - if (group.getState() == TaskGroup.State.FAILED) { - // mark the task as aborted - task.aborted(); + /** + * Gets the default follow-up delay with which to initialize if one is not + * specified in the initialization configuration via the + * {@link #FOLLOW_UP_DELAY_KEY} initialization parameter. By default, this + * returns {@link #DEFAULT_FOLLOW_UP_DELAY}, but it may be overridden to return + * something more sensible for a derived implementation. + * + * @return The default follow-up delay with which to initialize. + * + * @see #getFollowUpDelay() + * @see #FOLLOW_UP_DELAY_KEY + * @see #DEFAULT_FOLLOW_UP_DELAY + */ + public long getDefaultFollowUpDelay() { + return DEFAULT_FOLLOW_UP_DELAY; + } - // we have a fast-fail group that is marked as failed - iter.remove(); // remove the aborted task - removedCount++; - } - } + /** + * The maximum number of milliseconds to defer a follow-up task. Once a + * follow-up task has been deferred this number of milliseconds it will no + * longer be purposely delayed to wait for additional duplicates to be scheduled + * and collapsed. It may be delayed because of a lack of resources to handle it. + * + * @return The maximum number of milliseconds to defer a follow-up task. + */ + public long getFollowUpTimeout() { + return this.followUpTimeout; + } - // return the number of removed tasks - return removedCount; + /** + * Gets the default follow-up timeout with which to initialize if one is not + * specified in the initialization configuration via the + * {@link #FOLLOW_UP_TIMEOUT_KEY} initialization parameter. By default, this + * returns {@link #DEFAULT_FOLLOW_UP_TIMEOUT}, but it may be overridden to + * return something more sensible for a derived implementation. + * + * @return The default follow-up timeout with which to initialize. + * + * @see #getFollowUpTimeout() + * @see #FOLLOW_UP_TIMEOUT_KEY + * @see #DEFAULT_FOLLOW_UP_TIMEOUT + */ + public long getDefaultFollowUpTimeout() { + return DEFAULT_FOLLOW_UP_TIMEOUT; } /** - * Gets the action for the backing tasks for this instance. + * Gets the number of milliseconds to lease follow-up messages for handling + * before they become available to be obtained again. The default implementation + * returns twice the {@linkplain #getFollowUpTimeout() follow-up timeout}. * - * @return The action for the backing tasks for this instance. + * @return The number of milliseconds to lease follow-up messages for handling + * before they become available to be obtained again. */ - public String getAction() { - return this.action; + public long getFollowUpLeaseTime() { + return this.getFollowUpTimeout() * 2; } /** - * Gets the unmodifiable {@link Map} describing the parameters for - * the backing tasks for this instance. + * The configured maximum number of follow-up tasks to retrieve from persistent + * search on a single retrieval. The retrieved follow-up tasks should be handled + * within the {@linkplain #getFollowUpTimeout() follow-up timeout} and so this + * number should not be so large that the tasks are not handled or their + * retrieval is renewed within the allotted time. * - * @return The unmodifiable {@link Map} describing the parameters - * for the backing tasks for this instance. + * @return The configured maximum number of follow-up tasks to retrieve from + * persistent storage on a single retrieval. */ - public SortedMap getParameters() { - return this.parameters; + public int getFollowUpFetchCount() { + return this.followUpFetch; } /** - * Gets the unmodifiable {@link Set} containing the {@link - * ResourceKey} instances identifying the resources for the backing tasks - * for this instance. + * Gets the default follow-up fetch count with which to initialize if one is not + * specified in the initialization configuration via the + * {@link #FOLLOW_UP_FETCH_KEY} initialization parameter. By default, this + * returns {@link #DEFAULT_FOLLOW_UP_FETCH}, but it may be overridden to return + * something more sensible for a derived implementation. * - * @return The unmodifiable {@link Set} containing the {@link - * ResourceKey} instances identifying the resources for the backing - * tasks for this instance. + * @return The default follow-up fetch count with which to initialize. + * + * @see #getFollowUpFetchCount() + * @see #FOLLOW_UP_FETCH_KEY + * @see #DEFAULT_FOLLOW_UP_FETCH */ - public SortedSet getResourceKeys() { - return this.resourceKeys; + public int getDefaultFollowUpFetchCount() { + return DEFAULT_FOLLOW_UP_FETCH; } /** - * Gets the {@link List} of backing tasks associated with the scheduled - * task. + * Gets the number of milliseconds to sleep between checking to see if task + * handling should cease. This timeout is used when there are no postponed tasks + * due to contention. * - * @return The {@link List} of {@link Task} instances describing the - * backing tasks for this instance. + * @return The number of milliseconds to sleep between checking to see if task + * handling should cease. This timeout is used when there are no + * postponed tasks due to contention. */ - public List getBackingTasks() { - if (this.backingTasks == null) { - return null; - } else { - return Collections.unmodifiableList(this.backingTasks); - } + public long getStandardTimeout() { + return this.standardTimeout; } /** - * Merges the specified {@link Task} with the other backing tasks of this - * instance. + * Gets the default standard timeout with which to initialize if one is not + * specified in the initialization configuration via the + * {@link #STANDARD_TIMEOUT_KEY} initialization parameter. By default, this + * returns {@link #DEFAULT_STANDARD_TIMEOUT}, but it may be overridden to return + * something more sensible for a derived implementation. * - * @param task The {@link Task} to merge. + * @return The default standard timeout with which to initialize. + * + * @see #getStandardTimeout() + * @see #STANDARD_TIMEOUT_KEY + * @see #DEFAULT_STANDARD_TIMEOUT */ - public void collapseWith(Task task) { - // check if one the tasks does not allow collapse - if (!this.isAllowingCollapse() || !task.isAllowingCollapse()) { - throw new UnsupportedOperationException( - "Cannot collapse specified task (" + task + ") with this task (" - + this.backingTasks.get(0) + ") because at least one does not " - + "allow collapse."); - } + public long getDefaultStandardTimeout() { + return DEFAULT_STANDARD_TIMEOUT; + } - // check if the task signatures do not match - if (!this.getSignature().equals(task.getSignature())) { - throw new IllegalArgumentException( - "Cannot collapse the specified task (" + task + ") with this task (" - + this.backingTasks.get(0) + ") because they are not duplicates."); - } + /** + * Gets the {@link TaskHandler} for this instance. + * + * @return The {@link TaskHandler} for this instance. + */ + @Override + public TaskHandler getTaskHandler() { + return this.taskHandler; + } - // add the backing tasks - task.markScheduled(); - this.backingTasks.add(task); + /** + * Sets the {@link TaskHandler} for this instance. + * + * @param taskHandler The {@link TaskHandler} for this instance. + */ + protected void setTaskHandler(TaskHandler taskHandler) { + this.taskHandler = taskHandler; } /** - * Gets the signature for the backing {@link Task} for this instance. + * Gets the {@link LockingService} for this instance. * - * @return The signature for the backing {@link Task} for this instance. + * @return The {@link LockingService} for this instance. */ - public String getSignature() { - return this.signature; + @Override + public LockingService getLockingService() { + return this.lockingService; } /** - * Checks whether the backing tasks allow collapsing duplicate tasks. + * Sets the {@link LockingService} for this instance. * - * @return true if the duplicate tasks can be collapsed with - * the backing task from this instance, and false if - * collapse is not allowed. + * @param lockingService The {@link LockingService} for this instance. */ - public boolean isAllowingCollapse() { - return this.allowCollapse; + protected void setLockingService(LockingService lockingService) { + this.lockingService = lockingService; } /** - * Gets the number of duplicate tasks identical to this one that were - * scheduled prior to the task being handled. + * Gets the default {@link LockingService} class name with which to initialize + * the backing {@link LockingService} if one is not specified in the + * initialization configuration via the {@link #LOCKING_SERVICE_CLASS_KEY} + * initialization parameter. By default, this returns the + * {@link #DEFAULT_LOCKING_SERVICE_CLASS_NAME}, but it may be overridden to + * return something more sensible for a derived implementation. + * + * @return The default {@link LockingService} class name with which to + * initialize. * - * @return The number of duplicate tasks like + * @see #initLockingService(JsonObject) + * @see #getDefaultLockingServiceConfig() + * @see #LOCKING_SERVICE_CLASS_KEY + * @see #LOCKING_SERVICE_CONFIG_KEY + * @see #DEFAULT_LOCKING_SERVICE_CLASS_NAME */ - public int getMultiplicity() { - if (this.multiplicity != null) { - return this.multiplicity; - } else { - return this.backingTasks.size(); - } + public String getDefaultLockingServiceClassName() { + return DEFAULT_LOCKING_SERVICE_CLASS_NAME; } /** - * Marks all the backing tasks to transition to the {@link - * Task.State#STARTED} state via {@link Task#beginHandling()}. + * Gets the default {@link JsonObject} configuration with which to initialize + * the backing {@link LockingService} if one is not specified in the + * initialization configuration via the {@link #LOCKING_SERVICE_CONFIG_KEY} + * initialization parameter. By default, this returns the null, but + * it may be overridden to return something more sensible for a derived + * implementation. + * + * @return The default {@link JsonObject} configuration with which to initialize + * the backing {@link LockingService}. + * + * @see #initLockingService(JsonObject) + * @see #getDefaultLockingServiceClassName() + * @see #LOCKING_SERVICE_CLASS_KEY + * @see #LOCKING_SERVICE_CONFIG_KEY + * @see #DEFAULT_LOCKING_SERVICE_CLASS_NAME */ - public void beginHandling() { - this.backingTasks.forEach((task) -> { - task.beginHandling(); - }); + public JsonObject getDefaultLockingServiceConfig() { + return null; } /** - * Marks this instance and the backing tasks as having succeeded. + * {@inheritDoc} */ - public void succeeded() { - this.successful = Boolean.TRUE; - this.backingTasks.forEach((task) -> { - task.succeeded(); - }); + @Override + public Scheduler createScheduler(boolean followUp) { + if (followUp) { + return new DefaultScheduler(this); + } else { + TaskGroup taskGroup = new TaskGroup(); + return new DefaultScheduler(this, taskGroup); + } } /** - * Marks this instance and the backing tasks as having failed. + * Creates a {@link Scheduler} for creating follow-up tasks to the specified + * {@link ScheduledTask} unless follow-up tasks are not supported for the + * specified {@link ScheduledTask}. The default implementation will always + * create a {@link DefaultScheduler} that will not have an associated + * {@link TaskGroup}. * - * @param failure The exception that occurred. + * @param task The {@link ScheduledTask} for which to create the follow-up + * scheduler. + * @return The follow-up {@link Scheduler} or null if follow-up + * tasks are not allowed for the specified {@link ScheduledTask}. */ - public void failed(Exception failure) { - this.successful = Boolean.FALSE; - this.backingTasks.forEach((task) -> { - task.failed(failure); - }); + protected Scheduler createFollowUpScheduler(ScheduledTask task) { + // create a follow-up scheduler + return new DefaultScheduler(this); } /** - * Checks if this {@link ScheduledTask} has been flagged as successful. - * This returns null if the {@link ScheduledTask} has not - * yet been handled, otherwise it returns {@link Boolean#TRUE} or {@link - * Boolean#FALSE}. + * Schedules the tasks in the specified {@link List}. + * + * @param tasks The {@link List} of {@link Task} instances. * - * @return {@link Boolean#TRUE} if successful, {@link Boolean#FALSE} if - * unsuccessful, and null if not yet completed. + * @throws ServiceExecutionException If a failure occurs in scheduling the + * tasks. If a failure occurs then it should + * be assumed that the tasks will not be + * handled and the associated message should + * be retried later. */ - public Boolean isSuccessful() { - return this.successful; + protected void scheduleTasks(List tasks) throws ServiceExecutionException { + synchronized (this) { + State state = this.getState(); + if (state != READY && state != ACTIVE) { + throw new IllegalStateException( + "Cannot schedule tasks if not in the " + READY + " or " + ACTIVE + " state: " + state); + } + } + + // loop through the tasks + for (Task task : tasks) { + synchronized (this) { + // get the task group + TaskGroup taskGroup = task.getTaskGroup(); + + // check if this is a follow-up task + if (taskGroup == null) { + logDebug("ENQUEUEING FOLLOW-UP TASK: ", task); + + // enqueue the follow-up task for later retrieval + this.enqueueFollowUpTask(task); + + // notify all that a new follow-up task was enqueued + this.notifyAll(); + continue; + } + + // get the task signature + String signature = task.getSignature(); + + // check if the specified task allows collapse + if (task.isAllowingCollapse()) { + // check for existing tasks by the same signature + ScheduledTask scheduledTask = this.taskCollapseLookup.get(signature); + if (scheduledTask != null) { + logDebug("SCHEDULING TASK: ", task, "COLLAPSING WITH: ", scheduledTask); + + // simply collapse with the existing scheduled task + scheduledTask.collapseWith(task); + + } else { + // create a scheduled task and add to the pending queue + scheduledTask = new ScheduledTask(task); + logDebug("SCHEDULING TASK: ", task); + this.pendingTasks.add(scheduledTask); + this.taskCollapseLookup.put(signature, scheduledTask); + } + + } else { + // the specified task cannot be collapsed with another + logDebug("SCHEDULING NON-COLLAPSING TASK: ", task); + ScheduledTask scheduledTask = new ScheduledTask(task); + this.pendingTasks.add(scheduledTask); + } + + // for good measure notify all that a new task was scheduled + this.notifyAll(); + } + } } /** - * Acquires the locks on the resources required for this instance. If - * no locks are required this simply returns true. + * Dequeues a previously enqueued {@link ScheduledTask}. * - * @param lockingService The {@link LockingService} to use. + * @return The {@link ScheduledTask} that was dequeued. + */ + protected synchronized ScheduledTask dequeueTask() { + this.timerPause(dequeueBlocking); + this.timerStart(dequeueTaskWaitLoop); + + // set the hit flag to true + boolean hit = true; + + int prevPendingCount = -1; + int prevPostponedCount = -1; + + // wait for a task to be available + while (this.getState().isAvailable() && (this.pendingTasks.size() == 0) && (!this.isFollowUpReadyCheckTime()) + && (!this.isPostponedReadyCheckTime())) { + // if we get here then no task was ready so we have a miss + hit = false; + + // toggle the timers + this.toggleActiveAndWaitingTimers(this.pendingTasks.size(), this.postponedTasks.size(), + this.workerPool.isBusy()); + + // determine if postponed tasks exist + boolean postponed = (this.getPostponedTaskCount() > 0); + + // determine how long to wait + long timeout = (postponed) ? Math.min(this.getPostponedTimeout(), this.getStandardTimeout()) + : this.getStandardTimeout(); + + // wait for the designated duration + this.timerStart(dequeueTaskWait); + try { + logDebug("SLEEPING BEFORE RETRIEVING " + (postponed ? "POSTPONED" : "FOLLOW-UP") + " TASK: " + timeout); + this.wait(timeout); + + } catch (InterruptedException ignore) { + // ignore the interruption + } finally { + this.timerPause(dequeueTaskWait); + } + } + this.timerPause(dequeueTaskWaitLoop); + + // grab a postponed task if available + ScheduledTask task = null; + TaskType taskType = null; + int taskTypeCount = this.taskTypeOrder.size(); + for (int index = 0; index < taskTypeCount && task == null; index++) { + taskType = this.taskTypeOrder.get(this.taskTypeIndex++); + this.taskTypeIndex = this.taskTypeIndex % taskTypeCount; + switch (taskType) { + case PENDING: + this.timerStart(checkPending); + try { + task = this.getReadyPendingTask(); + } catch (Exception e) { + logWarning(e, "FAILED TO OBTAIN A TASK FROM THE PENDING QUEUE"); + } finally { + this.timerPause(checkPending); + } + break; + + case POSTPONED: + this.timerStart(checkPostponed); + try { + task = this.getReadyPostponedTask(); + } catch (Exception e) { + logWarning(e, "FAILED TO OBTAIN A POSTPONED TASK, " + "DEFERRING POSTPONED TASKS FOR NOW"); + } finally { + this.timerPause(checkPostponed); + } + break; + + case FOLLOW_UP: + this.timerStart(checkFollowUp); + try { + task = this.getReadyFollowUpTask(); + } catch (ServiceExecutionException e) { + logWarning(e, "FAILED TO OBTAIN A FOLLOW-UP TASK, " + "DEFERRING FOLLOW-UP TASKS FOR NOW"); + } finally { + this.timerPause(checkFollowUp); + } + break; + + default: + throw new IllegalStateException("Unrecognized task type: " + taskType); + } + } + + // if not null then return the task + if (task != null) { + // ensure the timers toggled correctly + this.timerPause(waitingOnPostponed, waitingForTasks); + this.timerStart(activelyHandling); + this.updateDequeueHitRatio(hit); + + // update the state + if (this.getState() == READY) { + this.setState(ACTIVE); + } + + // check if we need to remove from the collapse lookup + if (!task.isFollowUp() && task.isAllowingCollapse()) { + ScheduledTask collapse = this.taskCollapseLookup.remove(task.getSignature()); + if (task != collapse) { + throw new IllegalStateException("Collapse lookup table did not contain the same task as was " + + "dequeued. expected=[ " + task + " ], actual=[ " + collapse + " ]"); + } + } + + // return the task for handling + return task; + } + + this.toggleActiveAndWaitingTimers(this.pendingTasks.size(), this.postponedTasks.size(), + this.workerPool.isBusy()); + this.updateDequeueHitRatio(false); + + // update the state + if ((this.getState() == ACTIVE) && (this.pendingTasks.size() == 0) && (this.postponedTasks.size() == 0) + && (!this.workerPool.isBusy())) { + // no pending or postponed tasks, no tasks being handled and we have none + // to return the user (e.g.: follow-up tasks), go from ACTIVE to READY + this.setState(READY); + + } else if (this.getState() == READY) { + // we are either busy handling tasks or we have pending or postponed tasks + // and we are in the READY state so transition to ACTIVE + this.setState(ACTIVE); + } + + return null; + } + + /** + * Returns a {@link ScheduledTask} from the pending queue that is ready for + * handling. This method will find the least-recently-scheduled task whose set + * of affected resources (identified by {@link ResourceKey} instances) could be + * locked without blocking and locks those resources. If no such pending task + * could be found then null is returned. * - * @return true if all required locks were obtained, otherwise - * false. + * @return The next pending {@link ScheduledTask} that is now ready to try, or + * null if none are ready to try. */ - public synchronized boolean acquireLocks(LockingService lockingService) { - if (this.lockToken != null) { - return true; - } + protected synchronized ScheduledTask getReadyPendingTask() { + this.timerStart(dequeueCheckLocked); + try { + // if none ready then check if we can grab a pending task + while (this.pendingTasks.size() > 0) { + // get the candidate task + ScheduledTask task = this.pendingTasks.remove(0); + + // check if the task is aborted + if (this.skipIfAborted(task)) { + continue; + } + + // attempt to lock the task resources + this.timerStart(obtainLocks); + boolean locked = task.acquireLocks(this.getLockingService()); + this.timerPause(obtainLocks); + + // if the lock was obtained, return the task + if (locked) { + return task; + } - Set resourceKeys = this.getResourceKeys(); - if (resourceKeys == null || resourceKeys.size() == 0) { - return true; - } + // if not locked then postpone the task + this.postponedTasks.add(task); - try { - this.lockToken = lockingService.acquireLocks(resourceKeys, 0L); + // check the postponed count to see if this is now the greatest + synchronized (this.getStatisticsMonitor()) { + int postponedCount = this.postponedTasks.size(); + if (postponedCount > this.greatestPostponedCount) { + this.greatestPostponedCount = postponedCount; + } + } + // notify all + this.notifyAll(); + } - } catch (ServiceExecutionException e) { - throw new RuntimeException(e); - } + // if we get here then return null + return null; - // check if the lock token is non-null - return (this.lockToken != null); + } finally { + this.timerPause(dequeueCheckLocked); + } } /** - * Releases any locks associated with the backing tasks. + * Returns a previously postponed {@link ScheduledTask} that is now ready to be + * processed. If the last time this method was called was less than the + * {@linkplain #getPostponedTimeout() postpone timeout} then this method returns + * null so that the previously postponed tasks are not checked for + * readiness too frequently. Otherwise, this method will find the least recently + * postponed {@link ScheduledTask} whose set of affected resources (identified + * by {@link ResourceKey} instances) are not currently locked. If there are no + * postponed {@link ScheduledTask} instance that meet the readiness criteria, + * then null is returned. * - * @param lockingService The {@link LockingService} with which to release - * the locks. + * @return The next postponed {@link ScheduledTask} that is now ready to try. */ - public synchronized void releaseLocks(LockingService lockingService) { - if (this.lockToken == null) { - return; - } + protected synchronized ScheduledTask getReadyPostponedTask() { + // get the elapsed time and update the timestamp + long now = System.nanoTime(); + long elapsedNanos = now - this.postponedNanoTime; + long elapsedMillis = elapsedNanos / ONE_MILLION; + + // check the timestamp + if (elapsedMillis < this.getPostponedTimeout()) { + return null; + } + + // check if there are no postponed messages + if (this.postponedTasks.size() == 0) { + // since we have checked all the postponed messages (none) and none are + // ready then we need to update the timestamp + this.postponedNanoTime = now; + + return null; + } - Set resourceKeys = this.getResourceKeys(); - if (resourceKeys == null || resourceKeys.size() == 0) { - return; - } + // iterate through the postponed messages + Iterator iter = this.postponedTasks.iterator(); + try { + while (iter.hasNext()) { + ScheduledTask task = iter.next(); + + // handle aborted tasks + if (this.skipIfAborted(task)) { + iter.remove(); + continue; + } - try { - int count = lockingService.releaseLocks(this.lockToken); + // attempt to lock the task resources + this.timerStart(obtainLocks); + boolean locked = task.acquireLocks(this.getLockingService()); + this.timerPause(obtainLocks); - this.lockToken = null; + if (locked) { + iter.remove(); + return task; + } + } - if (this.resourceKeys.size() != count) { - throw new IllegalStateException( - "Wrong number of locks released. released=[ " + count - + " ], expected=[ " + this.getResourceKeys().size() + " ]"); + } finally { + // check if we checked all the messages + if (!iter.hasNext()) { + // since we have checked all the postponed messages for readiness we + // can update the timestamp so we don't busy check again and again + this.postponedNanoTime = now; + } } - } catch (ServiceExecutionException e) { - throw new RuntimeException(e); - } + // if we get here without returning a message then return null + return null; } - } - /** - * The encapsulation of the result from the async workers. - */ - protected static class TaskResult { /** - * The {@link ScheduledTask} that was handled. - */ - private ScheduledTask task; + * Removes any aborted backing tasks from the specified {@link ScheduledTask}, + * tracks the aborted count and returns true if the specified + * {@link ScheduledTask} can be fully removed from the queue and ignored (i.e.: + * it has no more backing tasks). If not all backing tasks are aborted, then + * false is returned to indicate the task still needs to be + * handled. + * + * @param task The {@link ScheduledTask} to check if fully aborted and remove + * aborted tasks from. + * @return true if the specified {@link ScheduledTask} should be + * skipped because it is fully aborted, otherwise false. + */ + protected boolean skipIfAborted(ScheduledTask task) { + // remove any aborted tasks + int abortCount = task.removeAborted(); + this.taskAbortCount += abortCount; + + // check if aborted + if (task.getMultiplicity() == 0) { + if (task.isAllowingCollapse()) { + ScheduledTask collapse = this.taskCollapseLookup.get(task.getSignature()); + if (collapse == task) { + this.taskCollapseLookup.remove(task.getSignature()); + } else { + throw new IllegalStateException("Unexpected collapsing task in lookup. expected=[ " + task + + " ], found=[ " + collapse + " ]"); + } + } + return true; + } + + // return false if we get here + return false; + } /** - * The {@link Timers} associated with the handling of the associated task. + * Checks if a check should be performed against the readiness of the postponed + * tasks. This returns true if and only if there is at least one + * postponed task and the readiness check has not been performed within the + * configured postponed timeout. + * + * @return true if it is time to perform a postponed task readiness + * check, otherwise false. */ - private Timers timers; + protected synchronized boolean isPostponedReadyCheckTime() { + // no need to do a ready check if no postponed messages + if (this.postponedTasks.size() == 0) { + return false; + } + + // get the elapsed time and update the timestamp + long now = System.nanoTime(); + long elapsedNanos = now - this.postponedNanoTime; + long elapsedMillis = elapsedNanos / ONE_MILLION; + + // check the timestamp + return (elapsedMillis >= this.getPostponedTimeout()); + } /** - * Constructs with the specified parameters. - * - * @param task The {@link Task} that was handled. - * @param timers The {@link Timers} for handling the task. + * Returns a previously scheduled follow-up {@link ScheduledTask} that is now + * ready to be processed. If the resources that must be locked are not available + * for the follow-up task then it is left on the queue. If the last time this + * method was called was less than the {@linkplain #getPostponedTimeout() + * postpone timeout} then this method returns null so that the + * previously postponed tasks are not checked for readiness too frequently. + * Otherwise, this method will find the least recently postponed + * {@link ScheduledTask} whose set of affected resources (identified by + * {@link ResourceKey} instances) are not currently locked. If there are no + * postponed {@link ScheduledTask} instance that meet the readiness criteria, + * then null is returned. + * + * @return The next postponed {@link ScheduledTask} that is now ready to try. + * @throws ServiceExecutionException If a failure occurs in obtaining a + * follow-up task. */ - public TaskResult(ScheduledTask task, Timers timers) { - this.task = task; - this.timers = timers; + protected synchronized ScheduledTask getReadyFollowUpTask() throws ServiceExecutionException { + // get the current timestamp + long now = System.nanoTime(); + + // check if there are no follow-up messages + if (this.followUpTasks.size() <= 1) { + // we have no follow-up tasks in the cache, let's get some + List tasks = this.dequeueFollowUpTasks(this.getFollowUpFetchCount()); + + // add the follow-up tasks + this.followUpTasks.addAll(tasks); + this.followUpRenewNanos = now + ((this.getFollowUpLeaseTime() / 2) * ONE_MILLION); + + // check if we still have no follow-up tasks + if (this.followUpTasks.size() == 0) { + // since we have checked all the follow-up messages (none) and none are + // ready then we need to update the timestamp + this.followUpNanoTime = now; + logDebug("RESET FOLLOW-UP CHECK TIME"); + + // return null since there are no follow-up tasks + return null; + } + } else if (now > this.followUpRenewNanos) { + int size = this.followUpTasks.size() + this.inProgressFollowUpTasks.size(); + List renewList = new ArrayList<>(size); + renewList.addAll(this.inProgressFollowUpTasks.keySet()); + renewList.addAll(this.followUpTasks); + + // renew the leases on the follow-up tasks + this.renewFollowUpTasks(renewList); + } + + // iterate through the follow-up messages + Iterator iter = this.followUpTasks.iterator(); + try { + while (iter.hasNext()) { + // get the next follow-up task + ScheduledTask task = iter.next(); + + // attempt to lock the message resources + this.timerStart(obtainLocks); + boolean locked = task.acquireLocks(this.getLockingService()); + this.timerPause(obtainLocks); + + if (locked) { + iter.remove(); + this.inProgressFollowUpTasks.put(task, System.nanoTime()); + return task; + } + } + + } finally { + // check if we checked all the messages + if (!iter.hasNext()) { + // since we have checked all the follow-up messages for readiness we + // can update the timestamp so we don't busy check again and again + this.followUpNanoTime = now; + logDebug("RESET FOLLOW-UP CHECK TIME"); + } + } + + // if we get here without returning a message then return null + return null; } /** - * Gets the associated {@link ScheduledTask}. - * - * @return The associated {@link ScheduledTask}. + * Checks if a check should be performed against the readiness of the follow-up + * tasks. This returns true if and only if there is at least one + * follow-up task and the readiness check has not been performed within the + * configured follow-up timeout. + * + * @return true if it is time to perform a postponed task readiness + * check, otherwise false. */ - public ScheduledTask getTask() { - return this.task; + protected synchronized boolean isFollowUpReadyCheckTime() { + // get the elapsed time and update the timestamp + long now = System.nanoTime(); + long elapsedNanos = now - this.followUpNanoTime; + long elapsedMillis = elapsedNanos / ONE_MILLION; + + // check the timestamp + return (elapsedMillis >= (this.getFollowUpDelay() / 2)); } /** - * Gets the associated {@link Timers}. - * - * @return The associated {@link Timers}. + * Enqueues the specified follow-up {@link Task} instance and persists it for + * future retrieval. A follow-up {@link Task} does not belong to a + * {@link TaskGroup} and therefore should have a null + * {@linkplain Task#getTaskGroup() task group property}. + * + * @param task The follow-up {@link Task} to enqueue. + * + * @throws IllegalArgumentException If any of the specified {@link Task} + * belongs to a {@link TaskGroup}. + * + * @throws ServiceExecutionException If a failure occurs in persisting the + * specified {@link Task} instances + * + */ + protected abstract void enqueueFollowUpTask(Task task) throws ServiceExecutionException; + + /** + * Retrieves a number of follow-up tasks from persistent storage. This should + * mark the retrieved tasks as pending and should not return them again until + * at least after {@link #getFollowUpTimeout()} milliseconds has past. + * + * @param count The suggested number of follow-up tasks to retrieve from + * persistent storage. + * + * @return The {@link List} of follow-up {@link Task} instances retrieved from + * persistent storage. + * + * @throws ServiceExecutionException If a failure occurs in persisting the + * specified {@link Task} instances */ - public Timers getTimers() { - return this.timers; + protected abstract List dequeueFollowUpTasks(int count) throws ServiceExecutionException; + + /** + * Renews the leases on the specified follow-up tasks from persistent storage. + * This should mark the retrieved tasks as pending and update their expiration + * timestamps accordingly. The specified {@link ScheduledTask} instances should + * be directly modified via {@link ScheduledTask#setFollowUpExpiration(long)}. + * + * @param tasks The {@link ScheduledTask} instances for lease renewal. + * + * @throws ServiceExecutionException If a failure occurs in persisting the + * specified {@link Task} instances + */ + protected abstract void renewFollowUpTasks(List tasks) throws ServiceExecutionException; + + /** + * Marks the specified follow-up task as complete and removes it from persistent + * storage and is no longer available for dequeue. + * + * @param task The {@link ScheduledTask} to be marked as completed. + * + * @throws ServiceExecutionException If a failure occurs in persisting the + * specified {@link Task} instances + */ + protected abstract void completeFollowUpTask(ScheduledTask task) throws ServiceExecutionException; + + /** + * Calls the {@link #handleTasks()} function in a background thread after + * validating the current state of this instance. + */ + protected void backgroundHandleTasks() { + synchronized (this) { + // check if not "READY" + if (this.getState() != READY && this.getState() != ACTIVE) { + throw new IllegalStateException("Cannot call backgroundHandleTasks() if not in the " + READY + " or " + + ACTIVE + " state. Current state is " + this.getState()); + } + + // check if already handling tasks + if (this.handlingTasks) { + throw new IllegalStateException( + "Cannot call handleTasks() when it has already been called and is " + "still handling tasks."); + } + + // set the handling tasks flag + this.handlingTasks = true; + + // verify the handling thread is null + if (this.taskHandlingThread != null) { + throw new IllegalStateException("Task handling thread seems to already exist."); + } + + // create the thread + this.taskHandlingThread = new Thread(() -> { + TaskHandler taskHandler = this.getTaskHandler(); + Boolean ready = null; + int count = 0; + + try { + do { + if (count > 0) { + logInfo("****** STILL WAITING ON TASK HANDLER READINESS"); + } + count++; + ready = taskHandler.waitUntilReady(READY_TIMEOUT); + } while (FALSE.equals(ready)); + + } catch (InterruptedException e) { + logWarning("****** INTERRUPTED WHILE WAITING ON TASK HANDLER " + "READINESS"); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + return; + } + + // check if ready state indicates a failure + if (ready == null) { + logWarning("****** TASK HANDLER HAS INDICATED A FAILURE PREVENTING " + "READINESS (CHECK LOGS)"); + return; + } + + // check if ready state is false (should not get here) + if (FALSE.equals(ready)) { + logWarning("****** TASK HANDLER NEVER BECAME READY TO HANDLE TASKS"); + return; + } + + SUPPRESS_HANDLING_CHECK.set(true); + this.handleTasks(); + }); + + // start the thread + this.taskHandlingThread.start(); + } + } + + /** + * Provides a loop that continues to schedule and handle tasks as long as the + * {@link State} of this instance obtained from {@link #getState()} is indicates + * the service is {@linkplain State#isAvailable() available} or until there are + * no more pending or postponed tasks. If the state transitions such that the + * service is no longer {@linkplain State#isAvailable() available} the only + * previously scheduled tasks will be handled before the processing terminates. + * This method does not return until handling of the tasks is complete. + * + */ + protected void handleTasks() { + try { + if (!SUPPRESS_HANDLING_CHECK.get()) { + synchronized (this) { + if (this.getState().isAvailable()) { + throw new IllegalStateException("Cannot call handleTasks() if not in the " + READY + " or " + + ACTIVE + " state. Current state is " + this.getState()); + } + + // check if already handling tasks + if (this.handlingTasks) { + throw new IllegalStateException("Cannot call handleTasks() when it has already been called and " + + "tasks are still being handled."); + } + + // set the handling tasks flag + this.handlingTasks = true; + } + } + + // create the worker pool + synchronized (this) { + this.workerPool = new AsyncWorkerPool<>(this.getConcurrency()); + } + + // start the handling timer + this.timerStart(taskHandling, betweenTasks); + + // loop over the tasks + while (this.getState().isAvailable() || this.getPendingTaskCount() > 0 || this.getPostponedTaskCount() > 0 + || this.getLeasedFollowUpTaskCount() > 0) { + // dequeue a message + this.timerStart(dequeue, dequeueBlocking); + ScheduledTask task = this.dequeueTask(); + this.timerPause(dequeue); + + // check if we have a task + if (task != null) { + this.timerPause(betweenTasks); + this.timerStart(activelyHandling); + + // prep a task reference for the + final ScheduledTask currentTask = task; + final Timers timers = new Timers(); + timers.start(waitForWorker.toString()); + AsyncResult result = this.workerPool.execute(() -> { + try { + // handle the task + timers.start(handleTask.toString()); + currentTask.beginHandling(); + taskHandler.handleTask(currentTask.getAction(), currentTask.getParameters(), + currentTask.getMultiplicity(), this.createFollowUpScheduler(currentTask)); + timers.pause(handleTask.toString()); + + // in case of success mark it as handled + timers.start(markComplete.toString()); + currentTask.succeeded(); + timers.pause(markComplete.toString()); + + } catch (Exception e) { + // in case of exception mark it as failed + timers.start(markComplete.toString()); + currentTask.failed(e); + timers.pause(markComplete.toString()); + + } finally { + // remove from persistent store (mark completed) + if (currentTask.isFollowUp()) { + timers.start(completeFollowUp.toString()); + synchronized (this) { + this.inProgressFollowUpTasks.remove(currentTask); + } + this.completeFollowUpTask(currentTask); + timers.pause(completeFollowUp.toString()); + } + + // release any associated locks on the resources + timers.start(releaseLocks.toString()); + currentTask.releaseLocks(this.getLockingService()); + timers.pause(releaseLocks.toString()); + + // record statistics + this.recordStatistics(task, timers); + } + + return new TaskResult(currentTask, timers); + }); + + this.handleAsyncResult(result); + } + this.timerStart(betweenTasks); + } + + // when done, close out the worker pool + try { + // if we get here then all postponed tasks have been handled and we + // are no longer scheduling tasks -- time to wait for completion of + // in-flight tasks so they can be disposed + List> results = this.workerPool.close(); + for (AsyncResult result : results) { + this.handleAsyncResult(result); + } + } finally { + this.timerPause(taskHandling, activelyHandling, waitingForTasks, waitingOnPostponed); + + synchronized (this) { + this.handlingTasks = false; + this.workerPool = null; + this.notifyAll(); + } + } + + } catch (Exception e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + } + } + + /** + * Handles the {@link AsyncResult} from the {@link AsyncWorkerPool} after it is + * received. This extracts the {@link TaskResult} value and traps any exceptions + * (there should be none). It records the timings from the handling and calls + * {@link #postProcess(ScheduledTask)}. + * + * @param result The {@link AsyncResult} to handle, or null if no + * result was returned. + */ + protected void handleAsyncResult(AsyncResult result) { + if (result == null) { + return; + } + TaskResult taskResult = null; + + try { + taskResult = result.getValue(); + + } catch (Exception cannotHappen) { + // exceptions should be logged and consumed during processing and used + // to determine the disposability of the message/batch. + logError(cannotHappen, "UNEXPECTED EXCEPTION"); + throw new IllegalStateException(cannotHappen); + } + + ScheduledTask task = taskResult.getTask(); + this.timerStart(postProcess); + try { + this.postProcess(task); + } finally { + this.timerPause(postProcess); + } + } + + /** + * This method does nothing, but provides a hook so that it may be overridden to + * do any special handling on the {@link ScheduledTask} after it has been + * handled by the {@link TaskHandler}. + * + * @param task The {@link ScheduledTask} that was handled. + */ + protected void postProcess(ScheduledTask task) { + // do nothing + } + + /** + * Records the statistics pertaining to the specified {@link ScheduledTask} and + * using the specified {@link Timers} instance. + * + * @param scheduledTask The {@link ScheduledTask} that was completed. + * @param timers The {@link Timers} associated with the specified + * {@link ScheduledTask}. + */ + protected void recordStatistics(ScheduledTask scheduledTask, Timers timers) { + if (scheduledTask.isSuccessful() == null) { + logWarning("Statistics recorded for incomplete task: ", scheduledTask); + return; + } + synchronized (this.getStatisticsMonitor()) { + // increment the scheduled task count + this.handleCount++; + if (scheduledTask.isSuccessful()) { + this.handleSuccessCount++; + } else { + this.handleFailureCount++; + } + + // check if this task is a follow-up + boolean followUp = scheduledTask.isFollowUp(); + if (followUp) { + this.followUpHandleCount++; + } else { + this.standardHandleCount++; + } + + int multiplicity = scheduledTask.getMultiplicity(); + if (followUp) { + // update the follow-up multiplicity stats + if (multiplicity > this.greatestFollowUpMultiplicity) { + this.greatestFollowUpMultiplicity = multiplicity; + } + } else { + // update the greatest multiplicity + if (multiplicity > this.greatestMultiplicity) { + this.greatestMultiplicity = multiplicity; + } + } + + // get the handling time + String timerKey = handleTask.toString(); + long handlingMillis = timers.getElapsedTime(timerKey); + this.totalHandlingTime += handlingMillis; + if (this.longestHandlingTime < handlingMillis) { + this.longestHandlingTime = handlingMillis; + } + + // iterate over the backing tasks + scheduledTask.getBackingTasks().forEach(task -> { + TaskGroup taskGroup = task.getTaskGroup(); + + // if we have a task group then handle group statistics + if (taskGroup != null) { + boolean concluding = taskGroup.isConcludingTask(task); + if (concluding) { + this.taskGroupCount++; + int taskCount = taskGroup.getTaskCount(); + if (this.greatestGroupSize < taskCount) { + this.greatestGroupSize = taskCount; + } + long roundTrip = taskGroup.getRoundTripTime(); + this.totalTaskGroupTime += roundTrip; + if (roundTrip > this.longestTaskGroupTime) { + this.longestTaskGroupTime = roundTrip; + } + TaskGroup.State state = taskGroup.getState(); + if (state == TaskGroup.State.SUCCESSFUL) { + this.groupSuccessCount++; + } else if (state == TaskGroup.State.FAILED) { + this.groupFailureCount++; + } + } + } + + // handle task statistics + if (followUp) { + // since follow-up tasks are collapsed into a single backing task + // then we need to add the multiplicity instead + this.followUpCompleteCount += multiplicity; + switch (task.getState()) { + case SUCCESSFUL: + this.followUpSuccessCount += multiplicity; + break; + case FAILED: + this.followUpFailureCount += multiplicity; + break; + default: + logWarning("UNEXPECTED POST-COMPLETION TASK STATE: " + task.getState(), task); + } + } else { + this.taskCompleteCount++; + switch (task.getState()) { + case SUCCESSFUL: + this.taskSuccessCount++; + break; + case FAILED: + this.taskFailureCount++; + break; + default: + logWarning("UNEXPECTED POST-COMPLETION TASK STATE: " + task.getState(), task); + } + + long taskTime = task.getRoundTripTime(); + if (this.longestTaskTime < taskTime) { + this.longestTaskTime = taskTime; + } + this.totalTaskTime += taskTime; + } + }); + } + } + + /** + * Gets the number of queued tasks that are pending. + * + * @return The number of pending tasks. + */ + protected synchronized int getPendingTaskCount() { + return this.pendingTasks.size(); + } + + /** + * Gets the number of postponed tasks. + * + * @return The number of postponed tasks. + */ + protected synchronized int getPostponedTaskCount() { + return this.postponedTasks.size(); + } + + /** + * Gets the number of follow-up tasks cached in memory. + * + * @return The number of follow-up tasks cached in memory. + */ + protected synchronized int getLeasedFollowUpTaskCount() { + return this.followUpTasks.size(); + } + + /** + * Default implementation of + * {@link SchedulingService#init(JsonObject,TaskHandler)} that will initialize + * the base properties and then call {@link #doInit(JsonObject)} to complete the + * configuration. This implementation will ensure that this function is called + * in the {@link State#UNINITIALIZED} and that the service transitions to the + * {@link State#READY} state at its conclusion. + * + * @param config The {@link JsonObject} describing the configuration. + * @param taskHandler The {@link TaskHandler} to use for handling tasks. + * @throws ServiceSetupException If a failure occurs. + */ + @Override + public void init(JsonObject config, TaskHandler taskHandler) throws ServiceSetupException { + Objects.requireNonNull(taskHandler, "The specified TaskHandler cannot be null"); + synchronized (this) { + if (this.getState() != UNINITIALIZED) { + throw new IllegalStateException( + "Cannot initialize if not in the " + UNINITIALIZED + " state: " + this.getState()); + } + this.timerStart(initialize); + this.setState(INITIALIZING); + } + + try { + synchronized (this) { + // default to an empty JSON object if null + if (config == null) { + config = Json.createObjectBuilder().build(); + } + + this.lockingService = this.initLockingService(config); + this.setTaskHandler(taskHandler); + + this.concurrency = getConfigInteger(config, CONCURRENCY_KEY, 1, this.getDefaultConcurrency()); + + // get the postponed timeout + this.postponedTimeout = getConfigLong(config, POSTPONED_TIMEOUT_KEY, 0L, + this.getDefaultPostponedTimeout()); + + // get the standard timeout + this.standardTimeout = getConfigLong(config, STANDARD_TIMEOUT_KEY, 0L, + this.getDefaultStandardTimeout()); + + // get the follow-up delay + this.followUpDelay = getConfigLong(config, FOLLOW_UP_DELAY_KEY, 0L, this.getDefaultFollowUpDelay()); + + // get the follow-up timeout + this.followUpTimeout = getConfigLong(config, FOLLOW_UP_TIMEOUT_KEY, 0L, + this.getDefaultFollowUpTimeout()); + + // get the follow-up fetch + this.followUpFetch = getConfigInteger(config, FOLLOW_UP_FETCH_KEY, 1, + this.getDefaultFollowUpFetchCount()); + + // check that the follow-up timeout is greater than follow-up delay + if (this.followUpTimeout < this.followUpDelay) { + throw new ServiceSetupException("The configured value for " + FOLLOW_UP_TIMEOUT_KEY + " (" + + this.followUpTimeout + ") cannot be less than the " + "configured value for " + + FOLLOW_UP_DELAY_KEY + " (" + this.followUpDelay + ")."); + } + + // create the queues + this.pendingTasks = new LinkedList<>(); + this.postponedTasks = new LinkedList<>(); + this.followUpTasks = new LinkedList<>(); + this.inProgressFollowUpTasks = new IdentityHashMap<>(); + this.taskCollapseLookup = new LinkedHashMap<>(); + } + + // defer additional configuration + this.doInit(config); + + // set to the ready state + this.setState(READY); + this.backgroundHandleTasks(); + + } catch (Exception e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + this.setState(UNINITIALIZED); + throw new RuntimeException(e); + + } finally { + this.timerPause(initialize); + } + } + + /** + * The default implementation of this method gets the class name from the + * {@link #LOCKING_SERVICE_CLASS_KEY} parameter, constructs an instance of that + * class using the default constructor and then initializes the constructed + * {@link LockingService} instance using the {@link JsonObject} found in the + * specified configuration via the {@link #LOCKING_SERVICE_CONFIG_KEY} JSON + * property. + * + * @param jsonConfig The {@link JsonObject} describing the configuration for + * this instance of scheduling service. + * + * @return The {@link LockingService} that was created and initialized. + * @throws ServiceSetupException If a failure occurs in initializing the backing + * {@link LockingService}. + */ + @SuppressWarnings("unchecked") + protected LockingService initLockingService(JsonObject jsonConfig) throws ServiceSetupException { + try { + // get the LockingService class name from the config + String className = getConfigString(jsonConfig, LOCKING_SERVICE_CLASS_KEY, + this.getDefaultLockingServiceClassName()); + + // get the LockingService Class object from the class name + Class lockServiceClass = Class.forName(className); + + if (!LockingService.class.isAssignableFrom(lockServiceClass)) { + throw new ServiceSetupException( + "The configured locking service class for the " + LOCKING_SERVICE_CLASS_KEY + + " config parameter must " + "implement " + LockingService.class.getName()); + } + + // create an instance of the LockingService class + LockingService lockService = (LockingService) lockServiceClass.getConstructor().newInstance(); + + // get the locking service configuration + JsonObject lockServiceConfig = (jsonConfig.containsKey(LOCKING_SERVICE_CONFIG_KEY)) + ? getJsonObject(jsonConfig, LOCKING_SERVICE_CONFIG_KEY) + : this.getDefaultLockingServiceConfig(); + + // initialize the locking service + lockService.init(lockServiceConfig); + + // return the locking service + return lockService; + + } catch (ServiceSetupException e) { + throw e; + } catch (Exception e) { + throw new ServiceSetupException("Failed to initialize LockingService for SchedulingService", e); + } + } + + /** + * Called by the {@link #init(JsonObject,TaskHandler)} implementation after + * handling the base configuration parameters. + * + * @param config The {@link JsonObject} describing the configuration. + * + * @throws ServiceSetupException If a failure occurs during initialization. + */ + protected abstract void doInit(JsonObject config) throws ServiceSetupException; + + /** + * Implemented as a synchronized method to {@linkplain #setState(State) set the + * state} to {@link State#DESTROYING}, call {@link #doDestroy()} and then + * perform {@link #notifyAll()} and set the state to {@link State#DESTROYED}. + */ + public void destroy() { + synchronized (this) { + State state = this.getState(); + if (state == DESTROYED) { + return; + } + + if (state == DESTROYING) { + while (this.getState() != DESTROYED) { + try { + this.wait(this.getStandardTimeout()); + } catch (InterruptedException e) { + // ignore + } + } + // once DESTROYED state is found, just return + return; + } + + // begin destruction + this.setState(DESTROYING); + this.timerStart(destroy); + + // wait until no longer handling tasks + while (this.isHandlingTasks()) { + try { + this.wait(this.getStandardTimeout()); + + } catch (InterruptedException ignore) { + // do nothing + } + } + } + + // join against the scheduler thread + try { + this.taskHandlingThread.join(); + + } catch (InterruptedException ignore) { + // ignore the exception + } + + try { + // now complete the destruction / cleanup + this.doDestroy(); + + // destroy the locking service + this.lockingService.destroy(); + + } finally { + this.setState(DESTROYED); // this should notify all as well + this.timerPause(destroy); + } + } + + /** + * This is called from the {@link #destroy()} implementation and should be + * overridden by the concrete sub-class. + */ + protected abstract void doDestroy(); + + /** + * Converts the specified {@link Stat} instances to an array of {@link String} + * instances. + * + * @param statistics The {@link Stat} instances to convert. + * + * @return The array of {@link String} instances describing the specified + * {@link Stat} instances. + */ + private String[] convertTimerKeys(Stat... statistics) { + String[] names = (statistics == null || statistics.length == 0) ? null : new String[statistics.length]; + if (names != null) { + for (int index = 0; index < statistics.length; index++) { + names[index] = statistics[index].toString(); + } + } + return names; + } + + /** + * Merges the specified {@link Timers} with this instances {@link Timers} in a + * thread safe manner. + * + * @param timers The {@link Timers} to merge. + */ + protected void timerMerge(Timers timers) { + synchronized (this.getStatisticsMonitor()) { + this.timers.mergeWith(timers); + } + } + + /** + * Toggles the active and waiting timers. + * + * @param pendingCount The number of pending messages. + * @param postponedCount The number of postponed messages. + * @param busy true if the worker pool is busy, otherwise + * false. + */ + protected void toggleActiveAndWaitingTimers(int pendingCount, int postponedCount, boolean busy) { + synchronized (this.getStatisticsMonitor()) { + // check if there are messages + if (busy) { + this.timerPause(waitingForTasks, waitingOnPostponed); + this.timerStart(activelyHandling); + + } else if (pendingCount == 0 && postponedCount == 0) { + // no tasks pending or postponed + this.timerPause(activelyHandling, waitingOnPostponed); + this.timerStart(waitingForTasks); + + } else if (pendingCount > 0) { + // messages pending + this.timerPause(waitingForTasks, waitingOnPostponed); + this.timerStart(activelyHandling); + + } else if (postponedCount > 0) { + // none pending, but some postponed + this.timerPause(activelyHandling, waitingForTasks); + this.timerStart(waitingOnPostponed); + } + } + } + + /** + * Resumes the associated {@link Timers} in a thread-safe manner. + * + * @param statistic The {@link Stat} to resume. + * @param addlTimers The additional {@link Stat} instances to resume. + */ + protected void timerResume(Stat statistic, Stat... addlTimers) { + String[] names = this.convertTimerKeys(addlTimers); + synchronized (this.getStatisticsMonitor()) { + if (names == null) { + this.timers.resume(statistic.toString()); + } else { + this.timers.resume(statistic.toString(), names); + } + } + } + + /** + * Starts the associated {@link Timers} in a thread-safe manner. + * + * @param statistic The + * {@link com.senzing.listener.communication.AbstractMessageConsumer.Stat} + * to start. + * @param addlTimers The additional {@link Stat} instances to start. + */ + protected void timerStart(Stat statistic, Stat... addlTimers) { + String[] names = this.convertTimerKeys(addlTimers); + synchronized (this.getStatisticsMonitor()) { + if (names == null) { + this.timers.start(statistic.toString()); + } else { + this.timers.start(statistic.toString(), names); + } + } + } + + /** + * Pauses the associated {@link Timers} in a thread-safe manner. + * + * @param statistic The {@link Stat} to pause. + * @param addlTimers The additional {@link Stat} instances to pause. + */ + protected void timerPause(Stat statistic, Stat... addlTimers) { + String[] names = this.convertTimerKeys(addlTimers); + synchronized (this.getStatisticsMonitor()) { + if (names == null) { + this.timers.pause(statistic.toString()); + } else { + this.timers.pause(statistic.toString(), names); + } + } + } + + /** + * Gets the {@link Map} of + * {@link com.senzing.listener.communication.AbstractMessageConsumer.Stat} keys + * to their {@link Number} values in an atomic thread-safe manner. + * + * @return The {@link Map} of + * {@link com.senzing.listener.communication.AbstractMessageConsumer.Stat} + * keys to their {@link Number} values. + */ + @Override + public Map getStatistics() { + synchronized (this.getStatisticsMonitor()) { + Number value = null; + Map timings = this.timers.getTimings(); + + Map statsMap = new LinkedHashMap<>(); + + statsMap.put(AbstractSchedulingService.Stat.concurrency, this.getConcurrency()); + statsMap.put(AbstractSchedulingService.Stat.standardTimeout, this.getStandardTimeout()); + statsMap.put(AbstractSchedulingService.Stat.postponedTimeout, this.getPostponedTimeout()); + statsMap.put(AbstractSchedulingService.Stat.followUpDelay, this.getFollowUpDelay()); + statsMap.put(AbstractSchedulingService.Stat.followUpTimeout, this.getFollowUpTimeout()); + + value = this.getAverageTaskTime(); + if (value != null) { + statsMap.put(averageTaskTime, value); + } + + value = this.getAverageTaskGroupTime(); + if (value != null) { + statsMap.put(averageTaskGroupTime, value); + } + + value = this.getLongestTaskTime(); + if (value != null) { + statsMap.put(AbstractSchedulingService.Stat.longestTaskTime, value); + } + + value = this.getLongestTaskGroupTime(); + if (value != null) { + statsMap.put(AbstractSchedulingService.Stat.longestTaskGroupTime, value); + } + + statsMap.put(AbstractSchedulingService.Stat.taskCompleteCount, this.getCompletedTaskCount()); + statsMap.put(AbstractSchedulingService.Stat.taskSuccessCount, this.getSuccessfulTaskCount()); + statsMap.put(AbstractSchedulingService.Stat.taskFailureCount, this.getFailedTaskCount()); + statsMap.put(AbstractSchedulingService.Stat.taskAbortCount, this.getAbortedTaskCount()); + statsMap.put(AbstractSchedulingService.Stat.followUpCompleteCount, this.getCompletedFollowUpCount()); + statsMap.put(AbstractSchedulingService.Stat.followUpSuccessCount, this.getSuccessfulFollowUpCount()); + statsMap.put(AbstractSchedulingService.Stat.followUpFailureCount, this.getFailedFollowUpCount()); + + value = this.getAverageHandleTaskTime(); + if (value != null) { + statsMap.put(averageHandleTask, value); + } + + statsMap.put(handleTaskCount, this.getHandleTaskCount()); + statsMap.put(handleTaskSuccessCount, this.getSuccessfulHandleTaskCount()); + statsMap.put(handleTaskFailureCount, this.getFailedHandleTaskCount()); + + value = this.getFollowUpHandleTaskRatio(); + if (value != null) { + statsMap.put(followUpHandleTaskRatio, value); + } + + statsMap.put(taskGroupCompleteCount, this.getCompletedTaskGroupCount()); + statsMap.put(taskGroupSuccessCount, this.getSuccessfulTaskGroupCount()); + statsMap.put(taskGroupFailureCount, this.getFailedTaskGroupCount()); + + value = this.getAverageCompressionRatio(); + if (value != null) { + statsMap.put(averageCompression, value); + } + + value = this.getGreatestCompressionRatio(); + if (value != null) { + statsMap.put(greatestCompression, value); + } + + value = this.getAverageFollowUpCompressionRatio(); + if (value != null) { + statsMap.put(averageFollowUpCompression, value); + } + + value = this.getGreatestFollowUpCompressionRatio(); + if (value != null) { + statsMap.put(greatestFollowUpCompression, value); + } + + value = this.getAverageTaskGroupSize(); + if (value != null) { + statsMap.put(averageTaskGroupSize, value); + } + + value = this.getGreatestTaskGroupSize(); + if (value != null) { + statsMap.put(greatestTaskGroupSize, value); + } + + value = this.getParallelism(); + if (value != null) { + statsMap.put(parallelism, value); + } + + value = this.getDequeueHitRatio(); + if (value != null) { + statsMap.put(dequeueHitRatio, value); + } + + statsMap.put(AbstractSchedulingService.Stat.greatestPostponedCount, this.getGreatestPostponedCount()); + + // now get the timings + for (Stat statistic : AbstractSchedulingService.Stat.values()) { + value = timings.get(statistic.toString()); + if (value != null) { + statsMap.put(statistic, value); + } + } + + return statsMap; + } + } + + /** + * Gets the average task compression from collapsing non-follow-up tasks handled + * by the scheduling service. This returns null if no non-follow-up + * tasks have been handled. + * + * @return The average task compression from collapsing non-follow-up tasks + * handled by the scheduling service, or null if no + * non-follow-up tasks have been handled. + */ + public Double getAverageCompressionRatio() { + synchronized (this.getStatisticsMonitor()) { + if (this.standardHandleCount == 0) { + return null; + } + double completeCount = (double) this.taskCompleteCount; + double handleCount = (double) this.standardHandleCount; + return completeCount / handleCount; + } + } + + /** + * Gets the greatest task compression from collapsing non-follow-up tasks + * handled by the scheduling service. This returns null if no tasks + * have been handled. + * + * @return The greatest task compression from collapsing non-follow-up tasks + * handled by the scheduling service, or null if no tasks + * have been handled. + */ + public Integer getGreatestCompressionRatio() { + synchronized (this.getStatisticsMonitor()) { + if (this.greatestMultiplicity <= 0) { + return null; + } + return this.greatestMultiplicity; + } + } + + /** + * Gets the average task compression from collapsing follow-up tasks handled by + * the scheduling service. This returns null if no follow-up tasks + * have been handled. + * + * @return The average task compression from collapsing follow-up tasks handled + * by the scheduling service, or null if no follow-up tasks + * have been handled. + */ + public Double getAverageFollowUpCompressionRatio() { + synchronized (this.getStatisticsMonitor()) { + if (this.followUpHandleCount == 0) { + return null; + } + double completeCount = (double) this.followUpCompleteCount; + double handleCount = (double) this.followUpHandleCount; + return completeCount / handleCount; + } + } + + /** + * Gets the greatest task compression from collapsing follow-up tasks handled by + * the scheduling service. This returns null if no follow-up tasks + * have been handled. + * + * @return The greatest task compression from collapsing follow-up tasks handled + * by the scheduling service, or null if no follow-up tasks + * have been handled. + */ + public Integer getGreatestFollowUpCompressionRatio() { + synchronized (this.getStatisticsMonitor()) { + if (this.greatestFollowUpMultiplicity <= 0) { + return null; + } + return this.greatestFollowUpMultiplicity; + } + } + + /** + * Gets the average number of tasks in all the completed task groups. This + * returns null if no task groups have been completed. + * + * @return The average number of tasks in all the completed task groups, or + * null if no task groups have been completed. + */ + public Double getAverageTaskGroupSize() { + synchronized (this.getStatisticsMonitor()) { + if (this.taskGroupCount == 0) { + return null; + } + double completeCount = (double) this.taskCompleteCount; + double groupCount = (double) this.taskGroupCount; + return (completeCount / groupCount); + } + } + + /** + * Gets the dequeue hit ratio. This returns null if there have been + * no attempts to dequeue a task. + * + * @return The dequeue hit ratio, or null if no attempts have been + * made to dequeue a task. + */ + public Double getDequeueHitRatio() { + synchronized (this.getStatisticsMonitor()) { + if ((this.dequeueHitCount + this.dequeueMissCount) == 0) { + return null; + } + double hits = (double) this.dequeueHitCount; + double misses = (double) this.dequeueMissCount; + double total = hits + misses; + return (hits / total); + } + } + + /** + * Call this to increment the number of times dequeue has been called with or + * without a task ready to be dequeued. This function is thread-safe with + * respect to other statistics. + * + * @param hit true if we have a "hit" and there is a task ready to + * be dequeued, otherwise false for a "miss". + */ + protected void updateDequeueHitRatio(boolean hit) { + synchronized (this.getStatisticsMonitor()) { + if (hit) { + this.dequeueHitCount++; + } else { + this.dequeueMissCount++; + } + } + } + + /** + * The average time in milliseconds that non-follow-up tasks have taken from + * scheduling until completion. This returns null if no + * non-follow-up tasks have been handled. + * + * @return The average time in milliseconds that non-follow-up tasks have taken + * from scheduling until completion, or null if no + * non-follow-up tasks have been handled. + */ + public Double getAverageTaskTime() { + synchronized (this.getStatisticsMonitor()) { + if (this.taskCompleteCount == 0) { + return null; + } + double totalTime = (double) this.totalTaskTime; + double completeCount = (double) this.taskCompleteCount; + return totalTime / completeCount; + } + } + + /** + * The longest time in milliseconds that a non-follow-up task has taken from + * scheduling until completion. This returns null if no + * non-follow-up tasks have been handled. + * + * @return The longest time in milliseconds that a non-follow-up task has taken + * from scheduling until completion, or null if no + * non-follow-up tasks have been handled. + */ + public Long getLongestTaskTime() { + synchronized (this.getStatisticsMonitor()) { + if (this.longestTaskTime < 0) { + return null; + } + return this.longestTaskTime; + } + } + + /** + * Gets the average number of milliseconds from all task groups to be handled + * from the time first task in the group was scheduled until the last task was + * completed. This returns null if no task groups have been + * completed. + * + * @return The average number of milliseconds from all task groups to be handled + * from the time first task in the group was scheduled until the last + * task was completed, or null if no task groups have been + * completed. + */ + public Double getAverageTaskGroupTime() { + synchronized (this.getStatisticsMonitor()) { + if (this.taskGroupCount == 0) { + return null; + } + double totalTime = (double) this.totalTaskGroupTime; + double groupCount = (double) this.taskGroupCount; + return totalTime / groupCount; + } + } + + /** + * Gets the greatest number of milliseconds for a task groups to be handled from + * the time first task in the group was scheduled until the last task was + * completed. This returns null if no task groups have been + * completed. + * + * @return The greatest number of milliseconds for a task groups to be handled + * from the time first task in the group was scheduled until the last + * task was completed, or null if no task groups have been + * completed. + */ + public Long getLongestTaskGroupTime() { + synchronized (this.getStatisticsMonitor()) { + if (this.longestTaskGroupTime < 0L) { + return null; + } + return this.longestTaskGroupTime; + } + } + + /** + * Gets the number of non-follow-up tasks that have been completed. + * + * @return The number of non-follow-up tasks that have been completed. + */ + public long getCompletedTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.taskCompleteCount; + } + } + + /** + * Gets the number of non-follow-up tasks that have been completed successfully. + * + * @return The number of non-follow-up tasks that have been completed + * successfully. + */ + public long getSuccessfulTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.taskSuccessCount; + } + } + + /** + * Gets the number of non-follow-up tasks that have been completed + * unsuccessfully (i.e.: with failures). + * + * @return The number of non-follow-up tasks that have been completed + * unsuccessfully (i.e.: with failures). + */ + public long getFailedTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.taskFailureCount; + } + } + + /** + * Gets the number of non-follow-up tasks that were aborted. + * + * @return The number of non-follow-up tasks that were aborted. + */ + public long getAbortedTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.taskAbortCount; + } + } + + /** + * Gets the number of follow-up tasks that have been completed. + * + * @return The number of follow-up tasks that have been completed. + */ + public long getCompletedFollowUpCount() { + synchronized (this.getStatisticsMonitor()) { + return this.followUpCompleteCount; + } + } + + /** + * Gets the number of follow-up tasks that have been completed successfully. + * + * @return The number of follow-up tasks that have been completed successfully. + */ + public long getSuccessfulFollowUpCount() { + synchronized (this.getStatisticsMonitor()) { + return this.followUpSuccessCount; + } + } + + /** + * Gets the number of follow-up tasks that have been completed unsuccessfully + * (i.e.: with failures). + * + * @return The number of follow-up tasks that have been completed successfully + * (i.e.: with failures). + */ + public long getFailedFollowUpCount() { + synchronized (this.getStatisticsMonitor()) { + return this.followUpFailureCount; + } + } + + /** + * Get the average number of milliseconds spent calling + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} for tasks (both + * follow-up and non-follow-up). If no tasks have been handled then + * null is returned. + * + * @return The average number of milliseconds spent calling + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} for + * tasks, or null if no tasks have been handled. + */ + public Double getAverageHandleTaskTime() { + synchronized (this.getStatisticsMonitor()) { + if (this.handleCount == 0) { + return null; + } + double totalTime = ((double) this.totalHandlingTime); + double callCount = ((double) this.handleCount); + return totalTime / callCount; + } + } + + /** + * Get the total number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been called + * to handle tasks (both follow-up and non-follow-up). + * + * @return The total number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been + * called to handle tasks (both follow-up and non-follow-up). + */ + public long getHandleTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.handleCount; + } + } + + /** + * Get the total number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been called + * to handle tasks successfully (both follow-up and non-follow-up). + * + * @return The total number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been + * called to handle tasks successfully (both follow-up and + * non-follow-up). + */ + public long getSuccessfulHandleTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.handleSuccessCount; + } + } + + /** + * Get the total number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been called + * to handle tasks unsuccessfully (both follow-up and non-follow-up). + * + * @return The total number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been + * called to handle tasks unsuccessfully (both follow-up and + * non-follow-up). + */ + public long getFailedHandleTaskCount() { + synchronized (this.getStatisticsMonitor()) { + return this.handleFailureCount; + } + } + + /** + * Gets the ratio of the number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been called + * to handle follow-up tasks to the number of times it has been called to handle + * all tasks that have been handled. This returns null if no + * tasks have been handled. + * + * @return The ratio of the number of times + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} has been + * called to handle follow-up tasks to the number of times it has been + * called to handle all tasks that have been handled, or + * null if no tasks have been handled. + */ + public Double getFollowUpHandleTaskRatio() { + synchronized (this.getStatisticsMonitor()) { + if (this.handleCount == 0) { + return null; + } + double followUp = ((double) this.followUpHandleCount); + double all = ((double) this.handleCount); + return followUp / all; + } + } + + /** + * Gets the number of {@link TaskGroup} instances that have been folly handled + * (whether successful or not). + * + * @return The number of {@link TaskGroup} instances that have been folly + * handled (whether successful or not). + */ + public long getCompletedTaskGroupCount() { + synchronized (this.getStatisticsMonitor()) { + return this.taskGroupCount; + } + } + + /** + * Gets the number of task groups that have been successfully completed. + * + * @return The number of task groups that have been successfully completed. + */ + public long getSuccessfulTaskGroupCount() { + synchronized (this.getStatisticsMonitor()) { + return this.groupSuccessCount; + } + } + + /** + * Gets the number of task groups that have been completed with failures. + * + * @return The number of task groups that have been completed with failures. + */ + public long getFailedTaskGroupCount() { + synchronized (this.getStatisticsMonitor()) { + return this.groupFailureCount; + } + } + + /** + * The greatest number of tasks in the completed task groups. This returns + * null if no task groups have been completed. + * + * @return The greatest number of tasks in the completed task groups, or + * null if no task groups have been completed. + */ + public Integer getGreatestTaskGroupSize() { + synchronized (this.getStatisticsMonitor()) { + if (this.greatestGroupSize <= 0) { + return null; + } + return this.greatestGroupSize; + } + } + + /** + * Gets the ratio of the total handling time across all threads to the total + * active handling of the task scheduler to indicate the level of parallelism + * achieved. This returns null if no tasks have yet been handled. + * + * @return The ratio of the total handling time across all threads to the total + * active handling time of the task scheduler, or null if + * no tasks have been handled. + */ + public Double getParallelism() { + synchronized (this.getStatisticsMonitor()) { + String timerKey = activelyHandling.toString(); + Long activeTime = this.timers.getElapsedTime(timerKey); + if (activeTime == 0L) { + return null; + } + Double totalTime = (double) this.totalHandlingTime; + return (totalTime / ((double) activeTime)); + } + } + + /** + * Gets the greatest number of tasks that have been postponed. + * + * @return The greatest number of tasks that have been postponed. + */ + public int getGreatestPostponedCount() { + return this.greatestPostponedCount; + } + + /** + * Encapsulates a scheduled {@link Task} and all duplicates of that {@link Task} + * assuming the tasks can be collapsed. + * + */ + protected static class ScheduledTask { + /** + * The original backing task ID. + */ + private long origTaskId; + + /** + * Flag indicating if this contains follow-up tasks or non-follow-up tasks. + */ + private boolean followUp; + + /** + * The external follow-up ID to reference the task in persistent storage. + */ + private String followUpId; + + /** + * The follow-up multiplicity since the follow-up tasks lack backing tasks. + */ + private Integer multiplicity = null; + + /** + * The nanosecond when this scheduled task is considered to be expired. + */ + private Long expirationNanos = null; + + /** + * The action associated with the associated tasks. + */ + private String action; + + /** + * The parameters for the associated tasks. + */ + private SortedMap parameters; + + /** + * The resource keys for the associated tasks. + */ + private SortedSet resourceKeys; + + /** + * The {@link List} of duplicate {@link Task} instances. + */ + private List backingTasks; + + /** + * The signature for the tasks. + */ + private String signature; + + /** + * Flag indicating if this instance allows collapsing duplicate tasks. + */ + private boolean allowCollapse = false; + + /** + * Flag indicating if the task has succeeded. + */ + private Boolean successful = null; + + /** + * The {@link LockToken} for the resources that are locked for this task. + */ + private LockToken lockToken = null; + + /** + * Constructs with the first backing actual {@link Task}. + * + * @param task The actual {@link Task} that will back this instance. + */ + public ScheduledTask(Task task) { + this.origTaskId = task.getTaskId(); + this.followUp = task.getTaskGroup() == null; + this.backingTasks = new LinkedList<>(); + this.action = task.getAction(); + this.parameters = task.getParameters(); + this.resourceKeys = task.getResourceKeys(); + this.signature = task.getSignature(); + this.allowCollapse = task.isAllowingCollapse(); + this.lockToken = null; + this.successful = null; + this.expirationNanos = null; + task.markScheduled(); + this.backingTasks.add(task); + } + + /** + * Gets the task ID of the original backing task with which this instance was + * constructed. + * + * @return The task ID of the original backing task with which this instance was + * constructed. + */ + public long getOriginalBackingTaskId() { + return this.origTaskId; + } + + /** + * Overridden to return a diagnostic {@link String} describing this instance. + * + * @return A diagnostic {@link String} describing this instance. + */ + @Override + public String toString() { + JsonObjectBuilder job = Json.createObjectBuilder(); + JsonUtilities.add(job, "originalBackingTaskId", this.getOriginalBackingTaskId()); + JsonUtilities.add(job, "signature", this.getSignature()); + JsonUtilities.add(job, "allowCollapse", this.isAllowingCollapse()); + JsonUtilities.add(job, "multiplicity", this.getMultiplicity()); + JsonUtilities.add(job, "followUp", this.isFollowUp()); + if (this.expirationNanos != null) { + JsonUtilities.add(job, "followUpId", this.getFollowUpId()); + long deltaNanos = this.expirationNanos - System.nanoTime(); + long deltaMillis = deltaNanos / ONE_MILLION; + long expireMillis = System.currentTimeMillis() + deltaMillis; + Date expireDate = new Date(expireMillis); + JsonUtilities.add(job, "expiration", expireDate.toString()); + } + JsonUtilities.add(job, "successful", this.isSuccessful()); + JsonUtilities.add(job, "action", this.getAction()); + job.add("parameters", JsonUtilities.toJsonObjectBuilder(this.getParameters())); + JsonArrayBuilder jab = Json.createArrayBuilder(); + for (ResourceKey resourceKey : this.getResourceKeys()) { + jab.add(resourceKey.toString()); + } + job.add("resources", jab); + return "\n" + JsonUtilities.toJsonText(job.build()); + } + + /** + * Constructor for deserializing a follow-up task from persistent storage. + * + * @param jsonText The serialized JSON representation of the + * follow-up task. + * @param followUpId The optional external persistence ID for + * the follow-up task so it can be later + * marked complete in and deleted from + * persistent storage. + * @param multiplicity The collapsed multiplicity from persistent + * storage, which may be one (1) if the + * follow-up task did not allow collapsing + * with duplicate tasks. + * @param expirationTime The millisecond UTC time since then epoch + * when the follow-up task is considered to be + * "expired". + * @param elapsedMillisSinceCreation The number of milliseconds that have + * elapsed since the deserialized task was + * originally created, or null if + * unknown. + */ + public ScheduledTask(String jsonText, String followUpId, int multiplicity, long expirationTime, long elapsedMillisSinceCreation) { + this(Task.deserialize(jsonText, false, elapsedMillisSinceCreation)); + + this.followUp = true; + this.followUpId = followUpId; + this.allowCollapse = false; + this.multiplicity = multiplicity; + + // determine the expiration in a consistent manner + long now = System.currentTimeMillis(); + long remainingNanos = (expirationTime - now) * ONE_MILLION; + this.expirationNanos = System.nanoTime() + remainingNanos; + } + + /** + * Checks if the actual tasks backing this instance are follow-up tasks. Either + * all the tasks are follow-up tasks or all are not follow-up tasks. + * + * @return true if the tasks are follow-up tasks, otherwise + * false. + */ + public boolean isFollowUp() { + return this.followUp; + } + + /** + * This method always returns false if not a follow-up task. If + * this is a follow-up task then this returns true if the follow-up + * task is expired, otherwise false. + * + * @return true if this is an expired follow-up task, otherwise + * false. + */ + public boolean isFollowUpExpired() { + if (this.expirationNanos == null) { + return false; + } + return System.nanoTime() > this.expirationNanos; + } + + /** + * Updates the expiration time to the specified number of milliseconds since the + * epoch in UTC time coordinates. + * + * @param expiration The expiration time in number of milliseconds since the + * epoch in UTC time coordinates. + */ + public void setFollowUpExpiration(long expiration) { + // determine the expiration in a consistent manner + long now = System.currentTimeMillis(); + long remainingNanos = (expiration - now) * ONE_MILLION; + this.expirationNanos = System.nanoTime() + remainingNanos; + } + + /** + * Obtains the external ID used to identify the deserialized follow-up task in + * persistent storage. This should always return null if + * {@link #isFollowUp()} is false. This may return + * null if {@link #isFollowUp()} is true if the + * external persistent storage mechanism does not require an external ID. + * + * @return The external ID used to identify the deserialized follow-up task in + * persistent storage. + */ + public String getFollowUpId() { + return this.followUpId; + } + + /** + * Removes all backing tasks that have been flagged as aborted and returns the + * remaining number of backing tasks. If no backing tasks remain then this + * {@link ScheduledTask} should itself be aborted. + * + * @return The number of backing tasks that were removed because they were + * aborted. + */ + public synchronized int removeAborted() { + if (this.isFollowUp()) { + return 0; + } + + int removedCount = 0; + Iterator iter = this.backingTasks.iterator(); + while (iter.hasNext()) { + // get the next task + Task task = iter.next(); + + // get the task group, not a follow-up so we should always have one + TaskGroup group = task.getTaskGroup(); + + // check if the group is fast-fail, if not then no abort + if (!group.isFastFail()) { + continue; + } + + // check if the group has failed + if (group.getState() == TaskGroup.State.FAILED) { + // mark the task as aborted + task.aborted(); + + // we have a fast-fail group that is marked as failed + iter.remove(); // remove the aborted task + removedCount++; + } + } + + // return the number of removed tasks + return removedCount; + } + + /** + * Gets the action for the backing tasks for this instance. + * + * @return The action for the backing tasks for this instance. + */ + public String getAction() { + return this.action; + } + + /** + * Gets the unmodifiable {@link Map} describing the parameters for the + * backing tasks for this instance. + * + * @return The unmodifiable {@link Map} describing the parameters for the + * backing tasks for this instance. + */ + public SortedMap getParameters() { + return this.parameters; + } + + /** + * Gets the unmodifiable {@link Set} containing the {@link ResourceKey} + * instances identifying the resources for the backing tasks for this instance. + * + * @return The unmodifiable {@link Set} containing the + * {@link ResourceKey} instances identifying the resources for the + * backing tasks for this instance. + */ + public SortedSet getResourceKeys() { + return this.resourceKeys; + } + + /** + * Gets the {@link List} of backing tasks associated with the scheduled task. + * + * @return The {@link List} of {@link Task} instances describing the backing + * tasks for this instance. + */ + public List getBackingTasks() { + if (this.backingTasks == null) { + return null; + } else { + return Collections.unmodifiableList(this.backingTasks); + } + } + + /** + * Merges the specified {@link Task} with the other backing tasks of this + * instance. + * + * @param task The {@link Task} to merge. + */ + public void collapseWith(Task task) { + // check if one the tasks does not allow collapse + if (!this.isAllowingCollapse() || !task.isAllowingCollapse()) { + throw new UnsupportedOperationException("Cannot collapse specified task (" + task + ") with this task (" + + this.backingTasks.get(0) + ") because at least one does not " + "allow collapse."); + } + + // check if the task signatures do not match + if (!this.getSignature().equals(task.getSignature())) { + throw new IllegalArgumentException("Cannot collapse the specified task (" + task + ") with this task (" + + this.backingTasks.get(0) + ") because they are not duplicates."); + } + + // add the backing tasks + task.markScheduled(); + this.backingTasks.add(task); + } + + /** + * Gets the signature for the backing {@link Task} for this instance. + * + * @return The signature for the backing {@link Task} for this instance. + */ + public String getSignature() { + return this.signature; + } + + /** + * Checks whether the backing tasks allow collapsing duplicate tasks. + * + * @return true if the duplicate tasks can be collapsed with the + * backing task from this instance, and false if collapse + * is not allowed. + */ + public boolean isAllowingCollapse() { + return this.allowCollapse; + } + + /** + * Gets the number of duplicate tasks identical to this one that were scheduled + * prior to the task being handled. + * + * @return The number of duplicate tasks like + */ + public int getMultiplicity() { + if (this.multiplicity != null) { + return this.multiplicity; + } else { + return this.backingTasks.size(); + } + } + + /** + * Marks all the backing tasks to transition to the {@link Task.State#STARTED} + * state via {@link Task#beginHandling()}. + */ + public void beginHandling() { + this.backingTasks.forEach((task) -> { + task.beginHandling(); + }); + } + + /** + * Marks this instance and the backing tasks as having succeeded. + */ + public void succeeded() { + this.successful = Boolean.TRUE; + this.backingTasks.forEach((task) -> { + task.succeeded(); + }); + } + + /** + * Marks this instance and the backing tasks as having failed. + * + * @param failure The exception that occurred. + */ + public void failed(Exception failure) { + this.successful = Boolean.FALSE; + this.backingTasks.forEach((task) -> { + task.failed(failure); + }); + } + + /** + * Checks if this {@link ScheduledTask} has been flagged as successful. This + * returns null if the {@link ScheduledTask} has not yet been + * handled, otherwise it returns {@link Boolean#TRUE} or {@link Boolean#FALSE}. + * + * @return {@link Boolean#TRUE} if successful, {@link Boolean#FALSE} if + * unsuccessful, and null if not yet completed. + */ + public Boolean isSuccessful() { + return this.successful; + } + + /** + * Acquires the locks on the resources required for this instance. If no locks + * are required this simply returns true. + * + * @param lockingService The {@link LockingService} to use. + * + * @return true if all required locks were obtained, otherwise + * false. + */ + public synchronized boolean acquireLocks(LockingService lockingService) { + if (this.lockToken != null) { + return true; + } + + Set resourceKeys = this.getResourceKeys(); + if (resourceKeys == null || resourceKeys.size() == 0) { + return true; + } + + try { + this.lockToken = lockingService.acquireLocks(resourceKeys, 0L); + + } catch (ServiceExecutionException e) { + throw new RuntimeException(e); + } + + // check if the lock token is non-null + return (this.lockToken != null); + } + + /** + * Releases any locks associated with the backing tasks. + * + * @param lockingService The {@link LockingService} with which to release the + * locks. + */ + public synchronized void releaseLocks(LockingService lockingService) { + if (this.lockToken == null) { + return; + } + + Set resourceKeys = this.getResourceKeys(); + if (resourceKeys == null || resourceKeys.size() == 0) { + return; + } + + try { + int count = lockingService.releaseLocks(this.lockToken); + + this.lockToken = null; + + if (this.resourceKeys.size() != count) { + throw new IllegalStateException("Wrong number of locks released. released=[ " + count + + " ], expected=[ " + this.getResourceKeys().size() + " ]"); + } + + } catch (ServiceExecutionException e) { + throw new RuntimeException(e); + } + } + } + + /** + * The encapsulation of the result from the async workers. + */ + protected static class TaskResult { + /** + * The {@link ScheduledTask} that was handled. + */ + private ScheduledTask task; + + /** + * The {@link Timers} associated with the handling of the associated task. + */ + private Timers timers; + + /** + * Constructs with the specified parameters. + * + * @param task The {@link Task} that was handled. + * @param timers The {@link Timers} for handling the task. + */ + public TaskResult(ScheduledTask task, Timers timers) { + this.task = task; + this.timers = timers; + } + + /** + * Gets the associated {@link ScheduledTask}. + * + * @return The associated {@link ScheduledTask}. + */ + public ScheduledTask getTask() { + return this.task; + } + + /** + * Gets the associated {@link Timers}. + * + * @return The associated {@link Timers}. + */ + public Timers getTimers() { + return this.timers; + } } - } } diff --git a/src/main/java/com/senzing/listener/service/scheduling/Task.java b/src/main/java/com/senzing/listener/service/scheduling/Task.java index b02bb4f..71568d6 100644 --- a/src/main/java/com/senzing/listener/service/scheduling/Task.java +++ b/src/main/java/com/senzing/listener/service/scheduling/Task.java @@ -29,14 +29,14 @@ public class Task { */ enum State { /** - * The transient state that the task is in between when it is constructed - * and when it is scheduled to be executed. + * The transient state that the task is in between when it is constructed and + * when it is scheduled to be executed. */ UNSCHEDULED, /** - * The task is scheduled to be performed at some future time but has not - * yet been begun working. + * The task is scheduled to be performed at some future time but has not yet + * been begun working. */ SCHEDULED, @@ -57,8 +57,8 @@ enum State { FAILED, /** - * The task was aborted before being started because another task in the - * group failed. + * The task was aborted before being started because another task in the group + * failed. */ ABORTED; @@ -97,8 +97,8 @@ enum State { * Gets the unmodifiable {@link Set} of predecessor states for this * instance. * - * @return The unmodifiable {@link Set} of predecessor states for - * this instance. + * @return The unmodifiable {@link Set} of predecessor states for this + * instance. */ public Set getPredecessors() { return this.predecessors; @@ -108,8 +108,8 @@ public Set getPredecessors() { * Gets the unmodifiable {@link Set} of successor states for this * instance. * - * @return The unmodifiable {@link Set} of successor states for - * this instance. + * @return The unmodifiable {@link Set} of successor states for this + * instance. */ public Set getSuccessors() { return this.successors; @@ -123,44 +123,43 @@ public Set getSuccessors() { */ public enum Statistic { /** - * The time spent (in milliseconds) between constructing the task and - * scheduling the task. If the task is not yet scheduled then it is the - * time spent thus far. + * The time spent (in milliseconds) between constructing the task and scheduling + * the task. If the task is not yet scheduled then it is the time spent thus + * far. */ unscheduledTime, /** - * The time spent (in milliseconds) between scheduling the task and - * beginning to handle the task. If the task has not yet begun handling - * then it is the time spent thus far waiting to be handled. + * The time spent (in milliseconds) between scheduling the task and beginning to + * handle the task. If the task has not yet begun handling then it is the time + * spent thus far waiting to be handled. */ pendingTime, /** - * The time spent (in milliseconds) handling the task once it was no longer - * in a pending state. This is zero (0) if not yet handled and if the task - * is not yet completed it will be the time spent thus far. + * The time spent (in milliseconds) handling the task once it was no longer in a + * pending state. This is zero (0) if not yet handled and if the task is not yet + * completed it will be the time spent thus far. */ handlingTime, /** * The number of milliseconds from the point in time from when the task was - * scheduled until the time it completed either successfully, with a - * failure or was aborted. + * scheduled until the time it completed either successfully, with a failure or + * was aborted. */ roundTripTime, /** - * The total time (in milliseconds) from construction until completion - * (whether successful or failed) or until the current time if not yet - * completed. + * The total time (in milliseconds) from construction until completion (whether + * successful or failed) or until the current time if not yet completed. */ lifespan; /** - * Gets the unit of measure for this statistic. This is the unit that - * the {@link Number} value is measured in when calling {@link - * Task#getStatistics()}} + * Gets the unit of measure for this statistic. This is the unit that the + * {@link Number} value is measured in when calling + * {@link Task#getStatistics()}} * * @return The unit of measure for this statistic. */ @@ -214,15 +213,15 @@ private static synchronized long getNextTaskId() { private TaskGroup taskGroup = null; /** - * Flag indicating if this task instance allows tasks that are duplicates - * of this one to be collapsed into a single task handling with an - * incrementally increased multiplicity. + * Flag indicating if this task instance allows tasks that are duplicates of + * this one to be collapsed into a single task handling with an incrementally + * increased multiplicity. */ private boolean allowCollapse = true; /** - * The associated {@link Exception} if the handling of this task encountered - * a failure. + * The associated {@link Exception} if the handling of this task encountered a + * failure. */ private Exception failure = null; @@ -238,20 +237,20 @@ private static synchronized long getNextTaskId() { private long createdTimeNanos = -1L; /** - * The nanoseconds timestamp when this task was scheduled, or negative - * one (-1) if not yet scheduled. + * The nanoseconds timestamp when this task was scheduled, or negative one (-1) + * if not yet scheduled. */ private long scheduledTimeNanos = -1L; /** - * The nanosecond timestamp when this task was started, or negative - * one (-1) if not yet started. + * The nanosecond timestamp when this task was started, or negative one (-1) if + * not yet started. */ private long startedTimeNanos = -1L; /** - * The nanosecond timestamp when this task was completed, or negative - * one (-1) if not yet completed. + * The nanosecond timestamp when this task was completed, or negative one (-1) + * if not yet completed. */ private long completedTimeNanos = -1L; @@ -266,14 +265,10 @@ private static synchronized long getNextTaskId() { * null if the task is a follow-up task either * being constructed directly or deserialized from the * database. - * @param allowCollapse true if collapsing identical tasks of - * this type is allowed, otherwise false. - */ - Task(String action, - SortedMap parameters, - SortedSet resourceKeys, - TaskGroup taskGroup, - boolean allowCollapse) { + * @param allowCollapse true if collapsing identical tasks of this + * type is allowed, otherwise false. + */ + Task(String action, SortedMap parameters, SortedSet resourceKeys, TaskGroup taskGroup, boolean allowCollapse) { this.taskId = getNextTaskId(); this.action = action; this.parameters = new TreeMap<>(parameters); @@ -293,22 +288,17 @@ private static synchronized long getNextTaskId() { * @param parameters The {@link SortedMap} of parameters * for the task. * @param resourceKeys The {@link SortedSet} of - * {@link ResourceKey} instances - * for the task. + * {@link ResourceKey} instances for the + * task. * @param allowCollapse true if collapsing - * identical tasks of - * this type is allowed, otherwise - * false. + * identical tasks of this type is + * allowed, otherwise false. * @param elapsedMillisSinceSerialization The number of milliseconds that have * elapsed since the deserialized task * was originally created, or * null if unknown. */ - private Task(String action, - SortedMap parameters, - SortedSet resourceKeys, - boolean allowCollapse, - Long elapsedMillisSinceSerialization) { + private Task(String action, SortedMap parameters, SortedSet resourceKeys, boolean allowCollapse, Long elapsedMillisSinceSerialization) { this.taskId = getNextTaskId(); this.action = action; this.parameters = parameters; @@ -327,10 +317,10 @@ private Task(String action, } /** - * Returns the statistics for this {@link Task} instance. The statistics - * are returned as a {@link Map} of {@link Statistic} keys to {@link Number} - * values whose units are measured in the associated units for the given the - * key found via {@link Statistic#getUnits()}. + * Returns the statistics for this {@link Task} instance. The statistics are + * returned as a {@link Map} of {@link Statistic} keys to {@link Number} values + * whose units are measured in the associated units for the given the key found + * via {@link Statistic#getUnits()}. * * @return The statistics for this {@link Task}. */ @@ -367,9 +357,7 @@ public Map getStatistics() { * @return The deserialized {@link Task}. */ @SuppressWarnings("unchecked") - static Task deserialize(String jsonText, - boolean allowCollapse, - Long elapsedMillisSinceSerialization) { + static Task deserialize(String jsonText, boolean allowCollapse, Long elapsedMillisSinceSerialization) { JsonObject jsonObject = parseJsonObject(jsonText); String action = getString(jsonObject, "action"); @@ -379,13 +367,11 @@ static Task deserialize(String jsonText, JsonArray resourceArray = getJsonArray(jsonObject, "resources"); // get the parameters map - SortedMap paramsMap = (paramsObject == null) - ? Collections.emptySortedMap() + SortedMap paramsMap = (paramsObject == null) ? Collections.emptySortedMap() : new TreeMap<>((Map) normalizeJsonValue(paramsObject)); // get the list of resources - List resourceList = (resourceArray == null) - ? Collections.emptyList() + List resourceList = (resourceArray == null) ? Collections.emptyList() : (List) normalizeJsonValue(resourceArray); // convert the resources from strings to objects @@ -394,11 +380,7 @@ static Task deserialize(String jsonText, resourceSet.add(ResourceKey.parse(resourceKey)); } - return new Task(action, - paramsMap, - resourceSet, - allowCollapse, - elapsedMillisSinceSerialization); + return new Task(action, paramsMap, resourceSet, allowCollapse, elapsedMillisSinceSerialization); } /** @@ -411,8 +393,8 @@ public synchronized State getState() { } /** - * Sets the {@link State} of this task to the specified {@link State} which - * must be a valid transition from the {@linkplain #getState() current state}. + * Sets the {@link State} of this task to the specified {@link State} which must + * be a valid transition from the {@linkplain #getState() current state}. * * @param state The {@link State} to transition to. * @@ -424,32 +406,30 @@ private synchronized void setState(State state) { Set predecessors = state.getPredecessors(); Set successors = this.getState().getSuccessors(); - if ((!predecessors.contains(this.getState())) - || (!successors.contains(state))) { + if ((!predecessors.contains(this.getState())) || (!successors.contains(state))) { throw new IllegalArgumentException( - "Cannot transition to specified state (" + state + ") from " - + "current state (" + this.getState() + ")."); + "Cannot transition to specified state (" + state + ") from " + "current state (" + this.getState() + ")."); } this.state = state; this.notifyAll(); } /** - * Gets the associated {@link TaskGroup}, if any. This returns - * null if the task has no group. + * Gets the associated {@link TaskGroup}, if any. This returns null + * if the task has no group. * - * @return The associated {@link TaskGroup}, or null if this - * task has no group. + * @return The associated {@link TaskGroup}, or null if this task + * has no group. */ public TaskGroup getTaskGroup() { return this.taskGroup; } /** - * Checks if this task can be collapsed with other collapsible tasks that - * are identical to it for a single call to {@link - * TaskHandler#handleTask(String, Map, int, Scheduler)} with an incrementally - * increased multiplicity. + * Checks if this task can be collapsed with other collapsible tasks that are + * identical to it for a single call to + * {@link TaskHandler#handleTask(String, Map, int, Scheduler)} with an + * incrementally increased multiplicity. * * @return true if this task can be collapsed, otherwise * false. @@ -459,8 +439,8 @@ public boolean isAllowingCollapse() { } /** - * Checks if this {@link Task} instance has been marked in a state of - * completion either {@link State#SUCCESSFUL}, {@link State#FAILED} or + * Checks if this {@link Task} instance has been marked in a state of completion + * either {@link State#SUCCESSFUL}, {@link State#FAILED} or * {@link State#ABORTED}. * * @return true if completed, otherwise false. @@ -579,27 +559,26 @@ public SortedSet getResourceKeys() { /** * Gets the {@link Exception} describing any failure that may have occurred - * while handling this {@link Task}. This method returns null - * if the {@link Task} has not yet been handled or was handled but completed + * while handling this {@link Task}. This method returns null if + * the {@link Task} has not yet been handled or was handled but completed * successfully. * - * @return The {@link Exception} describing any failure that may have - * occurred while handling this {@link Task}, or null - * if the {@link Task} has not yet been handled or was handled but - * completed successfully. + * @return The {@link Exception} describing any failure that may have occurred + * while handling this {@link Task}, or null if the + * {@link Task} has not yet been handled or was handled but completed + * successfully. */ public Exception getFailure() { return this.failure; } /** - * Converts the specified {@link Task} to a {@link JsonObjectBuilder} - * describing the action, parameters and associated resource keys. + * Converts the specified {@link Task} to a {@link JsonObjectBuilder} describing + * the action, parameters and associated resource keys. * * @param task The {@link Task} to be represented as a {@link JsonObject}. * - * @return The {@link JsonObjectBuilder} describing the specified - * {@link Task}. + * @return The {@link JsonObjectBuilder} describing the specified {@link Task}. */ public static JsonObjectBuilder toJsonObjectBuilder(Task task) { JsonObjectBuilder job1 = Json.createObjectBuilder(); @@ -622,11 +601,10 @@ public static JsonObjectBuilder toJsonObjectBuilder(Task task) { } /** - * Converts this {@link Task} instance to a {@link JsonObjectBuilder} - * describing the action, parameters and associated resource keys. + * Converts this {@link Task} instance to a {@link JsonObjectBuilder} describing + * the action, parameters and associated resource keys. * - * @return The {@link JsonObjectBuilder} describing this {@link Task} - * instance. + * @return The {@link JsonObjectBuilder} describing this {@link Task} instance. */ public JsonObjectBuilder toJsonObjectBuilder() { return toJsonObjectBuilder(this); @@ -655,8 +633,8 @@ public JsonObject toJsonObject() { } /** - * Converts the specified {@link Task} to JSON text describing the - * action, parameters and associated resource keys. + * Converts the specified {@link Task} to JSON text describing the action, + * parameters and associated resource keys. * * @param task The {@link Task} to be represented as a {@link JsonObject}. * @@ -676,32 +654,32 @@ public String toJsonText() { } /** - * Gets a message digest signature which can be used to easily identify - * a serialized text representation of this task. + * Gets a message digest signature which can be used to easily identify a + * serialized text representation of this task. * * @param task The {@link Task} to convert to a signature. * - * @return A message digest signature which can be used to easily identify - * this a serialized text representation of this task. + * @return A message digest signature which can be used to easily identify this + * a serialized text representation of this task. */ public static String toSignature(Task task) { return toSignature(toJsonText(task)); } /** - * Gets a message digest signature which can be used to easily identify - * a serialized text representation of this task. + * Gets a message digest signature which can be used to easily identify a + * serialized text representation of this task. * - * @return A message digest signature which can be used to easily identify - * this a serialized text representation of this task. + * @return A message digest signature which can be used to easily identify this + * a serialized text representation of this task. */ public String getSignature() { return toSignature(this); } /** - * Converts the specified JSON text to a message digest signature which can - * be used to easily identify the specified serialized JSON text. + * Converts the specified JSON text to a message digest signature which can be + * used to easily identify the specified serialized JSON text. * * @param jsonText The JSON text representation of the task. * @@ -709,13 +687,12 @@ public String getSignature() { */ private static String toSignature(String jsonText) { try { + HexFormat hex = HexFormat.of(); MessageDigest md = MessageDigest.getInstance("SHA-256"); md.update(jsonText.getBytes(UTF_8)); byte[] digest = md.digest(); StringBuilder sb = new StringBuilder(); - for (byte b : digest) { - sb.append(Integer.toHexString(0xFF & b)); - } + sb.append(hex.formatHex(digest)); return sb.toString(); } catch (NoSuchAlgorithmException cannotHappen) { @@ -736,8 +713,8 @@ public String toString() { } /** - * Provided as a way to convert to a {@link String} without - * synchronization to avoid possible dead locks. + * Provided as a way to convert to a {@link String} without synchronization to + * avoid possible dead locks. * * @param state The {@link Task.State} known to the caller. * @@ -752,16 +729,15 @@ protected String toString(State state) { String signature = toSignature(jsonText); sb.append("signature=[ ").append(signature).append(" ], "); - sb.append("allowCollapse=[ ").append(this.isAllowingCollapse()) - .append(" ], ").append("task=[ ").append(jsonText).append(" ]"); + sb.append("allowCollapse=[ ").append(this.isAllowingCollapse()).append(" ], ").append("task=[ ").append(jsonText) + .append(" ]"); return sb.toString(); } /** - * Gets the number of milliseconds from the point in time at which this task - * was created until it was scheduled to be handled. If the task has not - * yet been scheduled then the number of milliseconds since it was created - * is returned. + * Gets the number of milliseconds from the point in time at which this task was + * created until it was scheduled to be handled. If the task has not yet been + * scheduled then the number of milliseconds since it was created is returned. * * @return The duration of the unscheduled time of this task in milliseconds. */ @@ -774,13 +750,12 @@ public synchronized long getUnscheduledTime() { } /** - * Gets the number of milliseconds from the point in time at which this task - * was scheduled until handling of the task was started. If the task has not - * yet been scheduled then negative one (-1) is returned. If the task has - * been scheduled, but has not yet been started then the number of - * milliseconds since it was scheduled is returned. If the task was a - * deserialized follow-up task then this may include the time spent in - * persistent storage. + * Gets the number of milliseconds from the point in time at which this task was + * scheduled until handling of the task was started. If the task has not yet + * been scheduled then negative one (-1) is returned. If the task has been + * scheduled, but has not yet been started then the number of milliseconds since + * it was scheduled is returned. If the task was a deserialized follow-up task + * then this may include the time spent in persistent storage. * * @return The duration of the pending time of this task in milliseconds. */ @@ -798,11 +773,11 @@ public synchronized long getPendingTime() { } /** - * Gets the number of milliseconds from the point in time at which handling - * of this task was started until handling completed successfully or with - * failures. If the task has not yet been started then negative one (-1) - * is returned. If the task started, but has not yet completed then the - * number of milliseconds since it was started is returned. + * Gets the number of milliseconds from the point in time at which handling of + * this task was started until handling completed successfully or with failures. + * If the task has not yet been started then negative one (-1) is returned. If + * the task started, but has not yet completed then the number of milliseconds + * since it was started is returned. * * @return The duration of the handling time of this task in milliseconds. */ @@ -818,15 +793,14 @@ public synchronized long getHandlingTime() { } /** - * Gets the number of milliseconds from the point in time from when this - * task was scheduled until the time it was completed either successfully or - * with failures (or aborted). If not yet completed then the number of - * milliseconds since the task was created is returned. If the task was a - * deserialized follow-up task then this may include the time spent in - * persistent storage. + * Gets the number of milliseconds from the point in time from when this task + * was scheduled until the time it was completed either successfully or with + * failures (or aborted). If not yet completed then the number of milliseconds + * since the task was created is returned. If the task was a deserialized + * follow-up task then this may include the time spent in persistent storage. * - * @return The duration of the time from scheduling to completion of this - * task in milliseconds. + * @return The duration of the time from scheduling to completion of this task + * in milliseconds. */ public synchronized long getRoundTripTime() { long result = this.elapsedMillisSinceSerialization; @@ -839,11 +813,11 @@ public synchronized long getRoundTripTime() { } /** - * Gets the number of milliseconds from the point in time from when this - * task was created until the time it was completed either successfully or - * with failures. If not yet completed then the number of milliseconds since - * the task was created is returned. If the task was a deserialized follow-up - * task then this may include the time spent in persistent storage. + * Gets the number of milliseconds from the point in time from when this task + * was created until the time it was completed either successfully or with + * failures. If not yet completed then the number of milliseconds since the task + * was created is returned. If the task was a deserialized follow-up task then + * this may include the time spent in persistent storage. * * @return The duration of the lifespan of this task in milliseconds. */ diff --git a/src/test/java/com/senzing/datamart/RabbitMqUriTest.java b/src/test/java/com/senzing/datamart/RabbitMqUriTest.java index b6fb458..23b8822 100644 --- a/src/test/java/com/senzing/datamart/RabbitMqUriTest.java +++ b/src/test/java/com/senzing/datamart/RabbitMqUriTest.java @@ -1312,9 +1312,11 @@ public void testEqualsAndHashWithVirtualHostAndQuery( uri2 = new RabbitMqUri( secure2, user2, password2, host2, virtualHost2, queryOptions2); - assertNotEquals(uri1, uri2, "Objects are unexpectedly not equal"); + assertNotEquals(uri1, uri2, "Objects are unexpectedly equal: " + + "uri1=[ " + uri1 + " ], uri2=[ " + uri2 + " ]"); assertNotEquals(uri1.hashCode(), uri2.hashCode(), - "Objects unexpectedly have different hash codes"); + "Objects unexpectedly have the same hash code: uri1=[ " + + uri1 + " ], uri2=[ " + uri2 + " ]"); } catch (Exception e) { fail("Failed test with exception.", e); diff --git a/src/test/java/com/senzing/datamart/SQLiteUriTest.java b/src/test/java/com/senzing/datamart/SQLiteUriTest.java index 762a3fd..30ad5a3 100644 --- a/src/test/java/com/senzing/datamart/SQLiteUriTest.java +++ b/src/test/java/com/senzing/datamart/SQLiteUriTest.java @@ -314,7 +314,7 @@ public void testParse(String unusedUser, String unusedPassword, File file, Map previousAffectedSet = null; - private int noOverlapCount = 0; - - private Set getAffectedSet(int minEntityId, - int maxEntityId, - int maxAffected) { - int idSpread = (maxEntityId - minEntityId); - Set affectedSet = new LinkedHashSet<>(); - int affectedCount = Math.max(1, PRNG.nextInt(maxAffected)); - for (int index2 = 0; index2 < affectedCount; index2++) { - long entityId = ((long) (minEntityId + PRNG.nextInt(idSpread))); - entityId = Math.min(entityId, (long) maxEntityId); - affectedSet.add(entityId); + private static SecureRandom PRNG = new SecureRandom(); + static { + double value = PRNG.nextDouble(); + } + private Set previousAffectedSet = null; + private int noOverlapCount = 0; + + private Set getAffectedSet(int minEntityId, int maxEntityId, int maxAffected) { + int idSpread = (maxEntityId - minEntityId); + Set affectedSet = new LinkedHashSet<>(); + int affectedCount = Math.max(1, PRNG.nextInt(maxAffected)); + for (int index2 = 0; index2 < affectedCount; index2++) { + long entityId = ((long) (minEntityId + PRNG.nextInt(idSpread))); + entityId = Math.min(entityId, (long) maxEntityId); + affectedSet.add(entityId); + } + + synchronized (this) { + if (previousAffectedSet != null) { + boolean overlap = false; + for (Long entityId : affectedSet) { + if (previousAffectedSet.contains(entityId)) { + overlap = true; + break; + } + } + noOverlapCount = (overlap) ? 0 : (noOverlapCount + 1); + + // check if we have had no contention in a while and force it if not + if (noOverlapCount > 20) { + // check if max size + if (affectedSet.size() == maxAffected) { + // remove the first if so + affectedSet.remove(affectedSet.iterator().next()); + } + // then add one from the previous to create overlap + affectedSet.add(previousAffectedSet.iterator().next()); + noOverlapCount = 0; + } + } + + // set the previous and return + previousAffectedSet = affectedSet; + } + return Collections.unmodifiableSet(affectedSet); } - synchronized (this) { - if (previousAffectedSet != null) { - boolean overlap = false; - for (Long entityId : affectedSet) { - if (previousAffectedSet.contains(entityId)) { - overlap = true; - break; - } - } - noOverlapCount = (overlap) ? 0 : (noOverlapCount + 1); - - // check if we have had no contention in a while and force it if not - if (noOverlapCount > 20) { - // check if max size - if (affectedSet.size() == maxAffected) { - // remove the first if so - affectedSet.remove(affectedSet.iterator().next()); - } - // then add one from the previous to create overlap - affectedSet.add(previousAffectedSet.iterator().next()); - noOverlapCount = 0; - } - } - - // set the previous and return - previousAffectedSet = affectedSet; + private String getRecordId(int nextRecordId) { + return "RECORD-" + nextRecordId; } - return Collections.unmodifiableSet(affectedSet); - } - - private String getRecordId(int nextRecordId) { - return "RECORD-" + nextRecordId; - } - - private String getDataSource(List dataSources) { - int index = PRNG.nextInt(dataSources.size()); - index = Math.min(Math.max(0, index), dataSources.size() - 1); - return dataSources.get(index); - } - - public int buildInfoBatches(List messageList, - int batchCount, - List dataSources, - int minBatchSize, - int maxBatchSize, - int minEntityId, - int maxEntityid, - int maxAffected, - double failureRate) { - // fabricate record IDs - int nextRecordId = (int) Math.pow( - 10, (Math.floor(Math.log10(batchCount * maxBatchSize)) + 1)); - int count = 0; - // create the result list - for (int index = 0; index < batchCount; index++) { - boolean failure = PRNG.nextDouble() < failureRate; - int failureCount = 0; - if (failure) { - failureCount = PRNG.nextInt(3); - } - // determine the batch size - int batchSize = Math.max(1, minBatchSize + PRNG.nextInt(maxBatchSize)); - int messageId = nextRecordId; - if (batchSize == 1) { - count++; - JsonObjectBuilder job = Json.createObjectBuilder(); - buildInfoMessage(job, - messageId, - failureCount, - null, - getDataSource(dataSources), - getRecordId(nextRecordId++), - getAffectedSet(minEntityId, maxEntityid, maxAffected)); - JsonObject jsonObject = job.build(); - String messageText = toJsonText(jsonObject); - messageList.add(new Message(messageId, messageText)); - } else { - count += batchSize; + private String getDataSource(List dataSources) { + int index = PRNG.nextInt(dataSources.size()); + index = Math.min(Math.max(0, index), dataSources.size() - 1); + return dataSources.get(index); + } + + public int buildInfoBatches(List messageList, int batchCount, List dataSources, int minBatchSize, int maxBatchSize, int minEntityId, int maxEntityid, int maxAffected, double failureRate) { + // fabricate record IDs + int nextRecordId = (int) Math.pow(10, (Math.floor(Math.log10(batchCount * maxBatchSize)) + 1)); + int count = 0; + // create the result list + for (int index = 0; index < batchCount; index++) { + boolean failure = PRNG.nextDouble() < failureRate; + int failureCount = 0; + if (failure) { + failureCount = PRNG.nextInt(3); + } + // determine the batch size + int batchSize = Math.max(1, minBatchSize + PRNG.nextInt(maxBatchSize)); + int messageId = nextRecordId; + if (batchSize == 1) { + count++; + JsonObjectBuilder job = Json.createObjectBuilder(); + buildInfoMessage(job, messageId, failureCount, null, getDataSource(dataSources), + getRecordId(nextRecordId++), getAffectedSet(minEntityId, maxEntityid, maxAffected)); + JsonObject jsonObject = job.build(); + String messageText = toJsonText(jsonObject); + messageList.add(new Message(messageId, messageText)); + + } else { + count += batchSize; + JsonArrayBuilder jab = Json.createArrayBuilder(); + nextRecordId = buildInfoBatch(jab, batchSize, dataSources, nextRecordId, failureCount, minEntityId, + maxEntityid, maxAffected); + JsonArray jsonArray = jab.build(); + String messageText = toJsonText(jsonArray); + messageList.add(new Message(messageId, messageText)); + } + } + return count; + } + + public Message buildInfoBatch(int batchSize, List dataSources, int nextRecordId, int maxFailureCount, int minEntityId, int maxEntityId, int maxAffected) { JsonArrayBuilder jab = Json.createArrayBuilder(); - nextRecordId = buildInfoBatch(jab, - batchSize, - dataSources, - nextRecordId, - failureCount, - minEntityId, - maxEntityid, - maxAffected); + int messageId = nextRecordId; + buildInfoBatch(jab, batchSize, dataSources, nextRecordId, maxFailureCount, minEntityId, maxEntityId, + maxAffected); JsonArray jsonArray = jab.build(); String messageText = toJsonText(jsonArray); - messageList.add(new Message(messageId, messageText)); - } - } - return count; - } - - public Message buildInfoBatch(int batchSize, - List dataSources, - int nextRecordId, - int maxFailureCount, - int minEntityId, - int maxEntityId, - int maxAffected) { - JsonArrayBuilder jab = Json.createArrayBuilder(); - int messageId = nextRecordId; - buildInfoBatch(jab, - batchSize, - dataSources, - nextRecordId, - maxFailureCount, - minEntityId, - maxEntityId, - maxAffected); - JsonArray jsonArray = jab.build(); - String messageText = toJsonText(jsonArray); - return new Message(messageId, messageText); - } - - public int buildInfoBatch(JsonArrayBuilder builder, - int batchSize, - List dataSources, - int nextRecordId, - int maxFailureCount, - int minEntityId, - int maxEntityId, - int maxAffected) { - int messageId = nextRecordId; // all in the batch belong to same message - for (int index1 = 0; index1 < batchSize; index1++) { - JsonObjectBuilder job = Json.createObjectBuilder(); - int failureCount = (maxFailureCount == 0) - ? 0 - : PRNG.nextInt(maxFailureCount); - buildInfoMessage(job, - messageId, - failureCount, - null, - getDataSource(dataSources), - getRecordId(nextRecordId++), - getAffectedSet(minEntityId, maxEntityId, maxAffected)); - builder.add(job); + return new Message(messageId, messageText); } - return nextRecordId; - } - - public String buildInfoMessage(int messageId, - String dataSource, - String recordId, - long... affectedEntityIds) { - return buildInfoMessage(messageId, - 0, - null, - dataSource, - recordId, - affectedEntityIds); - } - - public String buildInfoMessage(int messageId, - int failureCount, - Long processingTime, - String dataSource, - String recordId, - long... affectedEntityIds) { - JsonObjectBuilder job = Json.createObjectBuilder(); - buildInfoMessage(job, - messageId, - failureCount, - processingTime, - dataSource, - recordId, - affectedEntityIds); - JsonObject jsonObject = job.build(); - return JsonUtilities.toJsonText(jsonObject); - } - - public String buildInfoMessage(int messageId, - String dataSource, - String recordId, - Set affectedEntityIds) { - return buildInfoMessage(messageId, - 0, - null, - dataSource, - recordId, - affectedEntityIds); - } - - public String buildInfoMessage(int messageId, - int failureCount, - Long processingTime, - String dataSource, - String recordId, - Set affectedEntityIds) { - JsonObjectBuilder job = Json.createObjectBuilder(); - buildInfoMessage(job, - messageId, - failureCount, - processingTime, - dataSource, - recordId, - affectedEntityIds); - JsonObject jsonObject = job.build(); - return JsonUtilities.toJsonText(jsonObject); - } - - public void buildInfoMessage(JsonObjectBuilder builder, - int messageId, - String dataSource, - String recordId, - long... affectedEntityIds) { - this.buildInfoMessage(builder, - messageId, - 0, - null, - dataSource, - recordId, - affectedEntityIds); - } - - public void buildInfoMessage(JsonObjectBuilder builder, - int messageId, - int failureCount, - Long processTime, - String dataSource, - String recordId, - long... affectedEntityIds) { - Set affectedSet = new LinkedHashSet<>(); - for (long entityId : affectedEntityIds) { - affectedSet.add(entityId); - } - this.buildInfoMessage(builder, - messageId, - failureCount, - processTime, - dataSource, - recordId, - affectedSet); - } - - public void buildInfoMessage(JsonObjectBuilder builder, - int messageId, - String dataSource, - String recordId, - Set affectedEntityIds) { - buildInfoMessage(builder, - messageId, - 0, - null, - dataSource, - recordId, - affectedEntityIds); - } - - public void buildInfoMessage(JsonObjectBuilder builder, - int messageId, - int failureCount, - Long processTime, - String dataSource, - String recordId, - Set affectedEntityIds) { - builder.add("MESSAGE_ID", messageId); - if (failureCount > 0) { - builder.add("FAILURE_COUNT", failureCount); + + public int buildInfoBatch(JsonArrayBuilder builder, int batchSize, List dataSources, int nextRecordId, int maxFailureCount, int minEntityId, int maxEntityId, int maxAffected) { + int messageId = nextRecordId; // all in the batch belong to same message + for (int index1 = 0; index1 < batchSize; index1++) { + JsonObjectBuilder job = Json.createObjectBuilder(); + int failureCount = (maxFailureCount == 0) ? 0 : PRNG.nextInt(maxFailureCount); + buildInfoMessage(job, messageId, failureCount, null, getDataSource(dataSources), + getRecordId(nextRecordId++), getAffectedSet(minEntityId, maxEntityId, maxAffected)); + builder.add(job); + } + return nextRecordId; } - if (processTime != null) { - builder.add("PROCESSING_TIME", processTime); + + public String buildInfoMessage(int messageId, String dataSource, String recordId, long... affectedEntityIds) { + return buildInfoMessage(messageId, 0, null, dataSource, recordId, affectedEntityIds); } - builder.add("DATA_SOURCE", dataSource); - builder.add("RECORD_ID", recordId); - JsonArrayBuilder jab = Json.createArrayBuilder(); - for (long entityId : affectedEntityIds) { - JsonObjectBuilder job2 = Json.createObjectBuilder(); - job2.add("ENTITY_ID", entityId); - job2.add("LENS_CODE", "DEFAULT"); - jab.add(job2); + + public String buildInfoMessage(int messageId, int failureCount, Long processingTime, String dataSource, String recordId, long... affectedEntityIds) { + JsonObjectBuilder job = Json.createObjectBuilder(); + buildInfoMessage(job, messageId, failureCount, processingTime, dataSource, recordId, affectedEntityIds); + JsonObject jsonObject = job.build(); + return JsonUtilities.toJsonText(jsonObject); } - builder.add("AFFECTED_ENTITIES", jab); - } - public static class Message { - private int id; - private String body; - private Long processingTime = null; + public String buildInfoMessage(int messageId, String dataSource, String recordId, Set affectedEntityIds) { + return buildInfoMessage(messageId, 0, null, dataSource, recordId, affectedEntityIds); + } - public Message(int id, String msgText) { - this(id, null, msgText); + public String buildInfoMessage(int messageId, int failureCount, Long processingTime, String dataSource, String recordId, Set affectedEntityIds) { + JsonObjectBuilder job = Json.createObjectBuilder(); + buildInfoMessage(job, messageId, failureCount, processingTime, dataSource, recordId, affectedEntityIds); + JsonObject jsonObject = job.build(); + return JsonUtilities.toJsonText(jsonObject); } - public Message(int id, Long processingTime, String msgText) { - this.id = id; - this.body = msgText; - this.processingTime = processingTime; + public void buildInfoMessage(JsonObjectBuilder builder, int messageId, String dataSource, String recordId, long... affectedEntityIds) { + this.buildInfoMessage(builder, messageId, 0, null, dataSource, recordId, affectedEntityIds); } - public int getId() { - return this.id; + public void buildInfoMessage(JsonObjectBuilder builder, int messageId, int failureCount, Long processTime, String dataSource, String recordId, long... affectedEntityIds) { + Set affectedSet = new LinkedHashSet<>(); + for (long entityId : affectedEntityIds) { + affectedSet.add(entityId); + } + this.buildInfoMessage(builder, messageId, failureCount, processTime, dataSource, recordId, affectedSet); } - public String getBody() { - return this.body; + public void buildInfoMessage(JsonObjectBuilder builder, int messageId, String dataSource, String recordId, Set affectedEntityIds) { + buildInfoMessage(builder, messageId, 0, null, dataSource, recordId, affectedEntityIds); } - public String toString() { - return "Message (" + this.getId() + "): " + this.getBody(); + public void buildInfoMessage(JsonObjectBuilder builder, int messageId, int failureCount, Long processTime, String dataSource, String recordId, Set affectedEntityIds) { + builder.add("MESSAGE_ID", messageId); + if (failureCount > 0) { + builder.add("FAILURE_COUNT", failureCount); + } + if (processTime != null) { + builder.add("PROCESSING_TIME", processTime); + } + builder.add("DATA_SOURCE", dataSource); + builder.add("RECORD_ID", recordId); + JsonArrayBuilder jab = Json.createArrayBuilder(); + for (long entityId : affectedEntityIds) { + JsonObjectBuilder job2 = Json.createObjectBuilder(); + job2.add("ENTITY_ID", entityId); + job2.add("LENS_CODE", "DEFAULT"); + jab.add(job2); + } + builder.add("AFFECTED_ENTITIES", jab); } - } + public static class Message { + private int id; + private String body; + private Long processingTime = null; - public static class RecordId { - private String dataSource; - private String recordId; + public Message(int id, String msgText) { + this(id, null, msgText); + } - public RecordId(String dataSource, String recordId) { - this.dataSource = dataSource; - this.recordId = recordId; - } + public Message(int id, Long processingTime, String msgText) { + this.id = id; + this.body = msgText; + this.processingTime = processingTime; + } - public String getDataSource() { - return this.dataSource; - } + public int getId() { + return this.id; + } - public String getRecordId() { - return this.recordId; - } + public String getBody() { + return this.body; + } - @Override - public boolean equals(Object o) { - if (this == o) - return true; - if (o == null || this.getClass() != o.getClass()) - return false; - RecordId that = (RecordId) o; - return Objects.equals(this.getDataSource(), that.getDataSource()) - && Objects.equals(this.getRecordId(), that.getRecordId()); - } + public String toString() { + return "Message (" + this.getId() + "): " + this.getBody(); + } - @Override - public int hashCode() { - return Objects.hash(getDataSource(), getRecordId()); } - @Override - public String toString() { - return this.getDataSource() + ":" + this.getRecordId(); - } - } - - public static class TestMessageConsumer - extends AbstractMessageConsumer { - private List messageQueue = new LinkedList<>(); - private IdentityHashMap dequeuedMap = new IdentityHashMap<>(); - private Thread consumptionThread = null; - private int dequeueCount; - private long dequeueSleep; - private long visibilityTimeout; - private long expectedFailureCount = 0L; - private long expectedMessageRetryCount = 0L; - private long expectedInfoMessageRetryCount = 0L; - - public TestMessageConsumer(int dequeueCount, - long dequeueSleep, - long visibilityTimeout, - List messages) { - this.dequeueCount = dequeueCount; - this.dequeueSleep = dequeueSleep; - this.visibilityTimeout = visibilityTimeout; - for (Message message : messages) { - this.messageQueue.add(message); - String body = message.getBody().trim(); - List jsonObjects = new ArrayList<>(); - if (body.startsWith("[")) { - JsonArray jsonArray = parseJsonArray(body); - for (JsonObject jsonObject : jsonArray.getValuesAs(JsonObject.class)) { - jsonObjects.add(jsonObject); - } - } else { - jsonObjects.add(parseJsonObject(body)); + public static class RecordId { + private String dataSource; + private String recordId; + + public RecordId(String dataSource, String recordId) { + this.dataSource = dataSource; + this.recordId = recordId; } - int maxFailures = 0; - for (JsonObject jsonObject : jsonObjects) { - int failureCount = getInteger(jsonObject, "FAILURE_COUNT", 0); - this.expectedFailureCount += failureCount; - if (failureCount > maxFailures) { - maxFailures = failureCount; - } + + public String getDataSource() { + return this.dataSource; } - this.expectedMessageRetryCount += maxFailures; - this.expectedInfoMessageRetryCount += (maxFailures * jsonObjects.size()); - } - } + public String getRecordId() { + return this.recordId; + } - public int getDequeueCount() { - return this.dequeueCount; - } + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || this.getClass() != o.getClass()) + return false; + RecordId that = (RecordId) o; + return Objects.equals(this.getDataSource(), that.getDataSource()) + && Objects.equals(this.getRecordId(), that.getRecordId()); + } - public long getDequeueSleep() { - return this.dequeueSleep; - } + @Override + public int hashCode() { + return Objects.hash(getDataSource(), getRecordId()); + } - public long getVisibilityTimeout() { - return this.visibilityTimeout; + @Override + public String toString() { + return this.getDataSource() + ":" + this.getRecordId(); + } } - public long getExpectedFailureCount() { - return this.expectedFailureCount; - } + public static class TestMessageConsumer extends AbstractMessageConsumer { + private List messageQueue = new LinkedList<>(); + private IdentityHashMap dequeuedMap = new IdentityHashMap<>(); + private Thread consumptionThread = null; + private int dequeueCount; + private long dequeueSleep; + private long visibilityTimeout; + private long expectedFailureCount = 0L; + private long expectedMessageRetryCount = 0L; + private long expectedInfoMessageRetryCount = 0L; + + public TestMessageConsumer(int dequeueCount, long dequeueSleep, long visibilityTimeout, List messages) { + this.dequeueCount = dequeueCount; + this.dequeueSleep = dequeueSleep; + this.visibilityTimeout = visibilityTimeout; + for (Message message : messages) { + this.messageQueue.add(message); + String body = message.getBody().trim(); + List jsonObjects = new ArrayList<>(); + if (body.startsWith("[")) { + JsonArray jsonArray = parseJsonArray(body); + for (JsonObject jsonObject : jsonArray.getValuesAs(JsonObject.class)) { + jsonObjects.add(jsonObject); + } + } else { + jsonObjects.add(parseJsonObject(body)); + } + int maxFailures = 0; + for (JsonObject jsonObject : jsonObjects) { + int failureCount = getInteger(jsonObject, "FAILURE_COUNT", 0); + this.expectedFailureCount += failureCount; + if (failureCount > maxFailures) { + maxFailures = failureCount; + } + } + this.expectedMessageRetryCount += maxFailures; + this.expectedInfoMessageRetryCount += (maxFailures * jsonObjects.size()); + } - public long getExpectedMessageRetryCount() { - return this.expectedMessageRetryCount; - } + } - public long getExpectedInfoMessageRetryCount() { - return this.expectedInfoMessageRetryCount; - } + public int getDequeueCount() { + return this.dequeueCount; + } - protected void doInit(JsonObject config) { - } + public long getDequeueSleep() { + return this.dequeueSleep; + } - protected void doDestroy() { - // join to the consumption thread - try { - this.consumptionThread.join(); - synchronized (this) { - this.consumptionThread = null; + public long getVisibilityTimeout() { + return this.visibilityTimeout; } - } catch (InterruptedException ignore) { - // ignore - } - } - protected void doConsume(MessageProcessor processor) { - this.consumptionThread = new Thread(() -> { - long start = System.nanoTime() - 15000000000L; - int timeoutCount = 0; - int restoreCount = 0; - while (this.getState() == CONSUMING) { - long end = System.nanoTime(); - if (((end - start) / 1000000L) > 10000L) { - start = end; - // if (timeoutCount > 0) { - // restoreCount += timeoutCount; - // System.err.println("RESTORED " + timeoutCount - // + " MESSAGES DUE TO VISIBILITY TIMEOUT " - // + "(" + restoreCount + " TOTAL)"); - // timeoutCount = 0; - // } - } - // dequeue messages - for (int index = 0; index < this.dequeueCount; index++) { - Message msg = null; - synchronized (this.messageQueue) { - if (this.messageQueue.size() == 0) - break; - msg = this.messageQueue.remove(0); - long now = System.nanoTime() / 1000000L; - this.dequeuedMap.put(msg, now); - } - this.enqueueMessages(processor, msg); - } - - // check for messages that have timed out and enqueue them again - synchronized (this.messageQueue) { - Iterator> iter = this.dequeuedMap.entrySet().iterator(); - while (iter.hasNext()) { - Map.Entry entry = iter.next(); - Message msg = entry.getKey(); - Long timestamp = entry.getValue(); - long now = System.nanoTime() / 1000000L; - if (now - timestamp > this.visibilityTimeout) { - iter.remove(); - timeoutCount++; - this.messageQueue.add(0, msg); - } - } - } - - // now sleep for a while - try { - Thread.sleep(this.dequeueSleep); - } catch (InterruptedException ignore) { - // ignore - } - } - // if (timeoutCount > 0) { - // restoreCount += timeoutCount; - // System.err.println("RESTORED " + timeoutCount - // + " MESSAGES DUE TO VISIBILITY TIMEOUT " - // + "(" + restoreCount + " TOTAL)"); - // } - }); - - this.consumptionThread.start(); - } + public long getExpectedFailureCount() { + return this.expectedFailureCount; + } - protected String extractMessageBody(Message msg) { - return msg.getBody(); - } + public long getExpectedMessageRetryCount() { + return this.expectedMessageRetryCount; + } - protected void disposeMessage(Message msg) { - synchronized (this.messageQueue) { - this.dequeuedMap.remove(msg); - } - } - } - - public static class MessageCounts - implements Cloneable, Comparable { - private String messageText; - private int beginCount = 0; - private int successCount = 0; - private int failureCount = 0; - private long firstBeginTime = 0L; - private long lastBeginTime = 0L; - private long lastEndTime = 0L; - private Integer messageId = null; - - public MessageCounts(String message) { - this.messageText = message; - try { - JsonObject jsonObject = parseJsonObject(this.messageText); - this.messageId = getInteger( - jsonObject, "MESSAGE_ID", null); - - } catch (Exception e) { - // allow for tests with bad JSON by having the MESSAGE_ID appear first - // in a stand-alone JSON object - try { - String firstLine = (new BufferedReader( - new StringReader(this.getMessageText()))).readLine(); - this.messageId = Integer.parseInt(firstLine.trim()); - } catch (Exception ignore) { - // do nothing + public long getExpectedInfoMessageRetryCount() { + return this.expectedInfoMessageRetryCount; } - } - } - public Object clone() { - try { - return super.clone(); - } catch (CloneNotSupportedException cannotHappen) { - throw new IllegalStateException("Unexpected clone failure"); - } - } + protected void doInit(JsonObject config) { + } - public int hashCode() { - synchronized (this) { - return Objects.hash( - this.getMessageId(), - this.getFirstBeginTime(), - this.getLastBeginTime(), - this.getLastEndTime(), - this.getBeginCount(), - this.getSuccessCount(), - this.getFailureCount()); - } - } + protected void doDestroy() { + // join to the consumption thread + try { + this.consumptionThread.join(); + synchronized (this) { + this.consumptionThread = null; + } + } catch (InterruptedException ignore) { + // ignore + } + } - public boolean equals(Object that) { - if (that == null) - return false; - if (this == that) - return true; - if (this.getClass() != that.getClass()) - return false; - MessageCounts counts = (MessageCounts) that; - int thisPriority = System.identityHashCode(this); - int thatPriority = System.identityHashCode(that); - MessageCounts first = (thisPriority < thatPriority) ? this : counts; - MessageCounts second = (thisPriority < thatPriority) ? counts : this; - synchronized (first) { - synchronized (second) { - return Objects.equals(this.getMessageId(), counts.getMessageId()) - && Objects.equals(this.getMessageText(), - counts.getMessageText()) - && Objects.equals(this.getFirstBeginTime(), - counts.getFirstBeginTime()) - && Objects.equals(this.getLastBeginTime(), - counts.getLastBeginTime()) - && Objects.equals(this.getLastEndTime(), - counts.getLastEndTime()) - && Objects.equals(this.getBeginCount(), - counts.getBeginCount()) - && Objects.equals(this.getSuccessCount(), - counts.getSuccessCount()) - && Objects.equals(this.getFailureCount(), - counts.getFailureCount()); - } - } - } + protected void doConsume(MessageProcessor processor) { + this.consumptionThread = new Thread(() -> { + long start = System.nanoTime() - 15000000000L; + int timeoutCount = 0; + int restoreCount = 0; + while (this.getState() == CONSUMING) { + long end = System.nanoTime(); + if (((end - start) / 1000000L) > 10000L) { + start = end; + // if (timeoutCount > 0) { + // restoreCount += timeoutCount; + // System.err.println("RESTORED " + timeoutCount + // + " MESSAGES DUE TO VISIBILITY TIMEOUT " + // + "(" + restoreCount + " TOTAL)"); + // timeoutCount = 0; + // } + } + // dequeue messages + for (int index = 0; index < this.dequeueCount; index++) { + Message msg = null; + synchronized (this.messageQueue) { + if (this.messageQueue.size() == 0) + break; + msg = this.messageQueue.remove(0); + long now = System.nanoTime() / 1000000L; + this.dequeuedMap.put(msg, now); + } + this.enqueueMessages(processor, msg); + } + + // check for messages that have timed out and enqueue them again + synchronized (this.messageQueue) { + Iterator> iter = this.dequeuedMap.entrySet().iterator(); + while (iter.hasNext()) { + Map.Entry entry = iter.next(); + Message msg = entry.getKey(); + Long timestamp = entry.getValue(); + long now = System.nanoTime() / 1000000L; + if (now - timestamp > this.visibilityTimeout) { + iter.remove(); + timeoutCount++; + this.messageQueue.add(0, msg); + } + } + } + + // now sleep for a while + try { + Thread.sleep(this.dequeueSleep); + } catch (InterruptedException ignore) { + // ignore + } + } + // if (timeoutCount > 0) { + // restoreCount += timeoutCount; + // System.err.println("RESTORED " + timeoutCount + // + " MESSAGES DUE TO VISIBILITY TIMEOUT " + // + "(" + restoreCount + " TOTAL)"); + // } + }); + + this.consumptionThread.start(); + } - public int compareTo(MessageCounts that) { - if (that == null) - return 1; - if (that == this) - return 0; - int thisPriority = System.identityHashCode(this); - int thatPriority = System.identityHashCode(that); - MessageCounts first = (thisPriority < thatPriority) ? this : that; - MessageCounts second = (thisPriority < thatPriority) ? that : this; - - synchronized (first) { - synchronized (second) { - long diff = this.getLastBeginTime() - that.getLastBeginTime(); - if (diff != 0) - return (diff < 0) ? -1 : 1; - diff = this.getFirstBeginTime() - that.getFirstBeginTime(); - if (diff != 0) - return (diff < 0) ? -1 : 1; - diff = this.getLastEndTime() - that.getLastEndTime(); - if (diff != 0) - return (diff < 0) ? -1 : 1; - diff = (this.getMessageId() - that.getMessageId()); - if (diff != 0) - return (diff < 0) ? -1 : 1; - diff = (this.getBeginCount() - that.getBeginCount()); - if (diff != 0) - return (diff < 0) ? -1 : 1; - diff = (this.getSuccessCount() - that.getSuccessCount()); - if (diff != 0) - return (diff < 0) ? -1 : 1; - diff = (this.getFailureCount() - that.getFailureCount()); - if (diff != 0) - return (diff < 0) ? -1 : 1; - return (this.getMessageText().compareTo(that.getMessageText())); - } - } - } + protected String extractMessageBody(Message msg) { + return msg.getBody(); + } - public Integer getMessageId() { - return this.messageId; + protected void disposeMessage(Message msg) { + synchronized (this.messageQueue) { + this.dequeuedMap.remove(msg); + } + } } - public synchronized void recordBegin() { - this.beginCount++; - long now = System.nanoTime(); - if (this.firstBeginTime == 0) - this.firstBeginTime = now; - this.lastBeginTime = now; - } + public static class MessageCounts implements Cloneable, Comparable { + private String messageText; + private int beginCount = 0; + private int successCount = 0; + private int failureCount = 0; + private long firstBeginTime = 0L; + private long lastBeginTime = 0L; + private long lastEndTime = 0L; + private Integer messageId = null; + + public MessageCounts(String message) { + this.messageText = message; + try { + JsonObject jsonObject = parseJsonObject(this.messageText); + this.messageId = getInteger(jsonObject, "MESSAGE_ID", null); + + } catch (Exception e) { + // allow for tests with bad JSON by having the MESSAGE_ID appear first + // in a stand-alone JSON object + try { + String firstLine = (new BufferedReader(new StringReader(this.getMessageText()))).readLine(); + this.messageId = Integer.parseInt(firstLine.trim()); + } catch (Exception ignore) { + // do nothing + } + } + } - public synchronized void recordSuccess() { - this.successCount++; - this.lastEndTime = System.nanoTime(); - } + public Object clone() { + try { + return super.clone(); + } catch (CloneNotSupportedException cannotHappen) { + throw new IllegalStateException("Unexpected clone failure"); + } + } - public synchronized void recordFailure() { - this.failureCount++; - this.lastEndTime = System.nanoTime(); - } + public int hashCode() { + synchronized (this) { + return Objects.hash(this.getMessageId(), this.getFirstBeginTime(), this.getLastBeginTime(), + this.getLastEndTime(), this.getBeginCount(), this.getSuccessCount(), this.getFailureCount()); + } + } - public String getMessageText() { - return this.messageText; - } + public boolean equals(Object that) { + if (that == null) + return false; + if (this == that) + return true; + if (this.getClass() != that.getClass()) + return false; + MessageCounts counts = (MessageCounts) that; + int thisPriority = System.identityHashCode(this); + int thatPriority = System.identityHashCode(that); + MessageCounts first = (thisPriority < thatPriority) ? this : counts; + MessageCounts second = (thisPriority < thatPriority) ? counts : this; + synchronized (first) { + synchronized (second) { + return Objects.equals(this.getMessageId(), counts.getMessageId()) + && Objects.equals(this.getMessageText(), counts.getMessageText()) + && Objects.equals(this.getFirstBeginTime(), counts.getFirstBeginTime()) + && Objects.equals(this.getLastBeginTime(), counts.getLastBeginTime()) + && Objects.equals(this.getLastEndTime(), counts.getLastEndTime()) + && Objects.equals(this.getBeginCount(), counts.getBeginCount()) + && Objects.equals(this.getSuccessCount(), counts.getSuccessCount()) + && Objects.equals(this.getFailureCount(), counts.getFailureCount()); + } + } + } - public synchronized int getBeginCount() { - return this.beginCount; - } + public int compareTo(MessageCounts that) { + if (that == null) + return 1; + if (that == this) + return 0; + int thisPriority = System.identityHashCode(this); + int thatPriority = System.identityHashCode(that); + MessageCounts first = (thisPriority < thatPriority) ? this : that; + MessageCounts second = (thisPriority < thatPriority) ? that : this; + + synchronized (first) { + synchronized (second) { + long diff = this.getLastBeginTime() - that.getLastBeginTime(); + if (diff != 0) + return (diff < 0) ? -1 : 1; + diff = this.getFirstBeginTime() - that.getFirstBeginTime(); + if (diff != 0) + return (diff < 0) ? -1 : 1; + diff = this.getLastEndTime() - that.getLastEndTime(); + if (diff != 0) + return (diff < 0) ? -1 : 1; + diff = (this.getMessageId() - that.getMessageId()); + if (diff != 0) + return (diff < 0) ? -1 : 1; + diff = (this.getBeginCount() - that.getBeginCount()); + if (diff != 0) + return (diff < 0) ? -1 : 1; + diff = (this.getSuccessCount() - that.getSuccessCount()); + if (diff != 0) + return (diff < 0) ? -1 : 1; + diff = (this.getFailureCount() - that.getFailureCount()); + if (diff != 0) + return (diff < 0) ? -1 : 1; + return (this.getMessageText().compareTo(that.getMessageText())); + } + } + } - public synchronized int getSuccessCount() { - return this.successCount; - } + public Integer getMessageId() { + return this.messageId; + } - public synchronized int getFailureCount() { - return this.failureCount; - } + public synchronized void recordBegin() { + this.beginCount++; + long now = System.nanoTime(); + if (this.firstBeginTime == 0) + this.firstBeginTime = now; + this.lastBeginTime = now; + } - public synchronized long getFirstBeginTime() { - return this.firstBeginTime; - } + public synchronized void recordSuccess() { + this.successCount++; + this.lastEndTime = System.nanoTime(); + } - public synchronized long getLastBeginTime() { - return this.lastBeginTime; - } + public synchronized void recordFailure() { + this.failureCount++; + this.lastEndTime = System.nanoTime(); + } - public synchronized long getLastEndTime() { - return this.lastEndTime; - } + public String getMessageText() { + return this.messageText; + } - public static String toString(Collection countsList) { - StringWriter sw = new StringWriter(); - PrintWriter pw = new PrintWriter(sw); - pw.println(); - for (MessageCounts counts : countsList) { - pw.println(" " + counts); - } - pw.println(); - pw.flush(); - return sw.toString(); - } + public synchronized int getBeginCount() { + return this.beginCount; + } - public String toString() { - synchronized (this) { - return "MESSAGE (" + this.getMessageId() - + "): begin=[ " + this.getBeginCount() + " / " - + this.getFirstBeginTime() + " / " + this.getLastBeginTime() - + " ], success=[ " + this.getSuccessCount() - + " ], failed=[ " + this.getFailureCount() - + " ], lastEndTime=[ " + this.getLastEndTime() + " ]"; - } - } - } - - public static class TestService extends AbstractListenerService { - private static final Map ACTION_MAP = Map.of( - RECORD, "RECORD", AFFECTED_ENTITY, "ENTITY"); - - private static final ThreadLocal MESSAGE_COUNTS = new ThreadLocal<>(); - - private long minProcessingTime = 10L; - private long maxProcessingTime = 60L; - private List failures = new LinkedList<>(); - private Map tasksByEntity = new LinkedHashMap<>(); - private Map countsByMessage = new LinkedHashMap<>(); - private double failureRate = 0.0; - private int handlingCount = 0; - private int processingCount = 0; - private boolean aborted = false; - - public TestService() { - super(ACTION_MAP); - } + public synchronized int getSuccessCount() { + return this.successCount; + } - public TestService(long processingTime, double failureRate) { - this(processingTime, processingTime, failureRate); - } + public synchronized int getFailureCount() { + return this.failureCount; + } - public TestService(long minProcessingTime, - long maxProcessingTime, - double failureRate) { - super(ACTION_MAP); - this.minProcessingTime = minProcessingTime; - this.maxProcessingTime = maxProcessingTime; - this.failureRate = failureRate; - } + public synchronized long getFirstBeginTime() { + return this.firstBeginTime; + } - private synchronized void logFailure(Exception e) { - e.printStackTrace(); - this.failures.add(e); - } + public synchronized long getLastBeginTime() { + return this.lastBeginTime; + } - public synchronized List getFailures() { - return new ArrayList<>(this.failures); - } + public synchronized long getLastEndTime() { + return this.lastEndTime; + } - public synchronized Map getMessageCounts() { - Map result = new LinkedHashMap<>(); - this.countsByMessage.values().forEach((counts) -> { - MessageCounts clone = (MessageCounts) counts.clone(); - result.put(clone.getMessageId(), clone); - }); - return result; - } + public static String toString(Collection countsList) { + StringWriter sw = new StringWriter(); + PrintWriter pw = new PrintWriter(sw); + pw.println(); + for (MessageCounts counts : countsList) { + pw.println(" " + counts); + } + pw.println(); + pw.flush(); + return sw.toString(); + } - public SchedulingService getSchedulingService() { - return super.getSchedulingService(); + public String toString() { + synchronized (this) { + return "MESSAGE (" + this.getMessageId() + "): begin=[ " + this.getBeginCount() + " / " + + this.getFirstBeginTime() + " / " + this.getLastBeginTime() + " ], success=[ " + + this.getSuccessCount() + " ], failed=[ " + this.getFailureCount() + " ], lastEndTime=[ " + + this.getLastEndTime() + " ]"; + } + } } - public synchronized int getSuccessCount() { - int successCount = 0; - for (MessageCounts counts : this.countsByMessage.values()) { - successCount += (counts.getSuccessCount() > 0) ? 1 : 0; - } - return successCount; - } + public static class TestService extends AbstractListenerService { + private static final Map ACTION_MAP = Map.of(RECORD, "RECORD", AFFECTED_ENTITY, "ENTITY"); - public synchronized boolean isProcessing() { - return (this.processingCount > 0); - } + private static final ThreadLocal MESSAGE_COUNTS = new ThreadLocal<>(); + + private long minProcessingTime = 10L; + private long maxProcessingTime = 60L; + private List failures = new LinkedList<>(); + private Map tasksByEntity = new LinkedHashMap<>(); + private Map countsByMessage = new LinkedHashMap<>(); + private double failureRate = 0.0; + private int handlingCount = 0; + private int processingCount = 0; + private boolean aborted = false; - public synchronized void awaitSuccess(TestMessageConsumer consumer, - int minSuccessCount, - ConnectionPool pool) { - long start = System.nanoTime() / 1000000L; - int successCount = this.getSuccessCount(); - boolean processing = this.isProcessing(); - while ((successCount < minSuccessCount || processing) && !this.aborted) { - long now = System.nanoTime() / 1000000L; - if ((now - start) > 10000L) { - start = now; - // printStatistics(consumer, this, pool); + public TestService() { + super(ACTION_MAP); } - try { - this.wait(this.maxProcessingTime); - } catch (InterruptedException ignore) { - // ignore + public TestService(long processingTime, double failureRate) { + this(processingTime, processingTime, failureRate); } - successCount = this.getSuccessCount(); - processing = this.isProcessing(); - } - } - private synchronized void beginHandling(String action, - Map parameters, - int multiplicity, - String taskAsJson) { - this.handlingCount++; - if (this.aborted) - return; - Object key = null; - switch (action) { - case "RECORD": - key = new RecordId( - parameters.get(DATA_SOURCE_PARAMETER_KEY).toString(), - parameters.get(RECORD_ID_PARAMETER_KEY).toString()); - break; - case "ENTITY": - key = parameters.get(ENTITY_ID_PARAMETER_KEY); - break; - case "DATA_SOURCE_COUNT": - key = parameters.get(DATA_SOURCE_PARAMETER_KEY); - break; - default: - key = null; - } - - if (key != null) { - if (this.tasksByEntity.containsKey(key)) { - this.aborted = true; - ProcessScopeLockingService lockingService = (ProcessScopeLockingService) this.getSchedulingService() - .getLockingService(); - lockingService.dumpLocks(); - - throw new IllegalStateException( - "Simultaneous processing of the same resource (" + key + "). " - + "inProgress=[ " + this.tasksByEntity.get(key) - + " ], conflicting=[ " + taskAsJson + " ]"); - } - this.tasksByEntity.put(key, taskAsJson); - } - } + public TestService(long minProcessingTime, long maxProcessingTime, double failureRate) { + super(ACTION_MAP); + this.minProcessingTime = minProcessingTime; + this.maxProcessingTime = maxProcessingTime; + this.failureRate = failureRate; + } - private synchronized MessageCounts beginProcessing(JsonObject message, - String jsonText) { - this.processingCount++; - if (this.aborted) - return null; - MessageCounts counts = this.countsByMessage.get(jsonText); - if (counts == null) { - counts = new MessageCounts(jsonText); - this.countsByMessage.put(jsonText, counts); - } - counts.recordBegin(); - MESSAGE_COUNTS.set(counts); - return counts; - } + private synchronized void logFailure(Exception e) { + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + this.failures.add(e); + } - private synchronized boolean isAborted() { - return this.aborted; - } + public synchronized List getFailures() { + return new ArrayList<>(this.failures); + } - private synchronized void endHandling(String action, - Map parameters, - int multiplicity, - String taskAsJson) { - this.handlingCount--; - if (this.aborted) - return; - Object key = null; - switch (action) { - case "RECORD": - key = new RecordId( - parameters.get(DATA_SOURCE_PARAMETER_KEY).toString(), - parameters.get(RECORD_ID_PARAMETER_KEY).toString()); - break; - case "ENTITY": - key = parameters.get(ENTITY_ID_PARAMETER_KEY); - break; - default: - key = null; - } - - if (key != null) { - String existing = this.tasksByEntity.get(key); - if (existing == null) { - this.aborted = true; - throw new IllegalStateException( - "Resource (" + key + ") was not marked for handling: " - + taskAsJson); - } - if (!existing.equals(taskAsJson)) { - this.aborted = true; - throw new IllegalStateException( - "Resource (" + key + ") was associated with another " - + "message. expected=[ " + taskAsJson + " ], found=[ " - + existing + " ]"); - } - - // remove the resource key - this.tasksByEntity.remove(key); - } - - this.notifyAll(); - } + public synchronized Map getMessageCounts() { + Map result = new LinkedHashMap<>(); + this.countsByMessage.values().forEach((counts) -> { + MessageCounts clone = (MessageCounts) counts.clone(); + result.put(clone.getMessageId(), clone); + }); + return result; + } - private synchronized MessageCounts endProcessing(JsonObject jsonObject, - String jsonText, - boolean success) { - this.processingCount--; - if (this.aborted) - return null; - - MessageCounts counts = this.countsByMessage.get(jsonText); - if (counts == null) { - this.aborted = true; - throw new IllegalStateException( - "Missing message counts for message: " + jsonText); - } - if (success) - counts.recordSuccess(); - else - counts.recordFailure(); - this.notifyAll(); - MESSAGE_COUNTS.set(null); - return counts; - } + public SchedulingService getSchedulingService() { + return super.getSchedulingService(); + } - @Override - protected void doInit(JsonObject config) { - // do nothing - } + public synchronized int getSuccessCount() { + int successCount = 0; + for (MessageCounts counts : this.countsByMessage.values()) { + successCount += (counts.getSuccessCount() > 0) ? 1 : 0; + } + return successCount; + } - @Override - public void process(JsonObject message) throws ServiceExecutionException { - String jsonText = JsonUtilities.toJsonText(message); - try { - MessageCounts counts = this.beginProcessing(message, jsonText); - boolean success = true; - try { - super.process(message); + public synchronized boolean isProcessing() { + return (this.processingCount > 0); + } - } catch (ServiceExecutionException e) { - success = false; - throw e; + public synchronized void awaitSuccess(TestMessageConsumer consumer, int minSuccessCount, ConnectionPool pool) { + long start = System.nanoTime() / 1000000L; + int successCount = this.getSuccessCount(); + boolean processing = this.isProcessing(); + while ((successCount < minSuccessCount || processing) && !this.aborted) { + long now = System.nanoTime() / 1000000L; + if ((now - start) > 10000L) { + start = now; + // printStatistics(consumer, this, pool); + } + try { + this.wait(this.maxProcessingTime); + + } catch (InterruptedException ignore) { + // ignore + } + successCount = this.getSuccessCount(); + processing = this.isProcessing(); + } + } - } catch (Exception e) { - this.logFailure(e); - success = false; + private synchronized void beginHandling(String action, Map parameters, int multiplicity, String taskAsJson) { + this.handlingCount++; + if (this.aborted) + return; + Object key = null; + switch (action) { + case "RECORD": + key = new RecordId(parameters.get(DATA_SOURCE_PARAMETER_KEY).toString(), + parameters.get(RECORD_ID_PARAMETER_KEY).toString()); + break; + case "ENTITY": + key = parameters.get(ENTITY_ID_PARAMETER_KEY); + break; + case "DATA_SOURCE_COUNT": + key = parameters.get(DATA_SOURCE_PARAMETER_KEY); + break; + default: + key = null; + } - } finally { - this.endProcessing(message, jsonText, success); + if (key != null) { + if (this.tasksByEntity.containsKey(key)) { + this.aborted = true; + ProcessScopeLockingService lockingService = (ProcessScopeLockingService) this.getSchedulingService() + .getLockingService(); + lockingService.dumpLocks(); + + throw new IllegalStateException("Simultaneous processing of the same resource (" + key + "). " + + "inProgress=[ " + this.tasksByEntity.get(key) + " ], conflicting=[ " + taskAsJson + " ]"); + } + this.tasksByEntity.put(key, taskAsJson); + } } - } catch (ServiceExecutionException e) { - // rethrow the simulated failure - throw e; + private synchronized MessageCounts beginProcessing(JsonObject message, String jsonText) { + this.processingCount++; + if (this.aborted) + return null; + MessageCounts counts = this.countsByMessage.get(jsonText); + if (counts == null) { + counts = new MessageCounts(jsonText); + this.countsByMessage.put(jsonText, counts); + } + counts.recordBegin(); + MESSAGE_COUNTS.set(counts); + return counts; + } - } catch (Exception e) { - this.logFailure(e); - if (!this.isAborted()) { - throw new ServiceExecutionException(e); + private synchronized boolean isAborted() { + return this.aborted; } - } - } - @Override - protected void scheduleTasks(JsonObject message, Scheduler scheduler) - throws ServiceExecutionException { - super.scheduleTasks(message, scheduler); - - // check for a forced failure - MessageCounts counts = MESSAGE_COUNTS.get(); - int maxFailures = getInteger(message, "FAILURE_COUNT", 0); - int failureCount = counts.getFailureCount(); - if (maxFailures > 0 && failureCount < maxFailures) { - scheduler.createTaskBuilder("FORCED_FAILURE") - .parameter("failureCount", failureCount) - .parameter("maxFailures", maxFailures) - .parameter("message", toJsonText(message)) - .schedule(false); - } + private synchronized void endHandling(String action, Map parameters, int multiplicity, String taskAsJson) { + this.handlingCount--; + if (this.aborted) + return; + Object key = null; + switch (action) { + case "RECORD": + key = new RecordId(parameters.get(DATA_SOURCE_PARAMETER_KEY).toString(), + parameters.get(RECORD_ID_PARAMETER_KEY).toString()); + break; + case "ENTITY": + key = parameters.get(ENTITY_ID_PARAMETER_KEY); + break; + default: + key = null; + } - } + if (key != null) { + String existing = this.tasksByEntity.get(key); + if (existing == null) { + this.aborted = true; + throw new IllegalStateException( + "Resource (" + key + ") was not marked for handling: " + taskAsJson); + } + if (!existing.equals(taskAsJson)) { + this.aborted = true; + throw new IllegalStateException("Resource (" + key + ") was associated with another " + + "message. expected=[ " + taskAsJson + " ], found=[ " + existing + " ]"); + } + + // remove the resource key + this.tasksByEntity.remove(key); + } - @Override - protected void handleTask(String action, - Map parameters, - int multiplicity, - Scheduler followUpScheduler) - throws ServiceExecutionException { - String jsonText = this.taskAsJson(action, parameters, multiplicity); - this.beginHandling(action, parameters, multiplicity, jsonText); - - try { - // check if we are dealing with a forced-failure - if ("FORCED_FAILURE".equals(action)) { - int failureCount = (Integer) parameters.get("failureCount"); - int maxFailures = (Integer) parameters.get("maxFailures"); - String message = (String) parameters.get("message"); - throw new ServiceExecutionException( - "Simulated failure (" + failureCount + " of " + maxFailures - + ") for message: " + message); - } - - // otherwise sleep for a period of time possibly with a random failure - long range = this.maxProcessingTime - this.minProcessingTime; - double percentage = PRNG.nextDouble(); - long processingTime = this.minProcessingTime - + ((long) (percentage * (double) range)); - boolean failure = PRNG.nextDouble() < this.failureRate; - try { - Thread.sleep(processingTime); - } catch (InterruptedException ignore) { - // do nothing + this.notifyAll(); } - if (failure) { - throw new ServiceExecutionException( - "Simulated random failure for task: " + jsonText); + private synchronized MessageCounts endProcessing(JsonObject jsonObject, String jsonText, boolean success) { + this.processingCount--; + if (this.aborted) + return null; + + MessageCounts counts = this.countsByMessage.get(jsonText); + if (counts == null) { + this.aborted = true; + throw new IllegalStateException("Missing message counts for message: " + jsonText); + } + if (success) + counts.recordSuccess(); + else + counts.recordFailure(); + this.notifyAll(); + MESSAGE_COUNTS.set(null); + return counts; } - if ("RECORD".equals(action) && (followUpScheduler != null) - && (PRNG.nextDouble() < 0.50)) { - String dataSource = parameters.get(DATA_SOURCE_PARAMETER_KEY).toString(); + @Override + protected void doInit(JsonObject config) { + // do nothing + } - // schedule a follow-up task - followUpScheduler.createTaskBuilder("INCREMENT_RECORD_COUNT") - .parameter(DATA_SOURCE_PARAMETER_KEY, dataSource) - .resource("DATA_SOURCE", dataSource) - .schedule(); + @Override + public void process(JsonObject message) throws ServiceExecutionException { + String jsonText = JsonUtilities.toJsonText(message); + try { + MessageCounts counts = this.beginProcessing(message, jsonText); + boolean success = true; + try { + super.process(message); + + } catch (ServiceExecutionException e) { + success = false; + throw e; + + } catch (Exception e) { + this.logFailure(e); + success = false; + + } finally { + this.endProcessing(message, jsonText, success); + } + + } catch (ServiceExecutionException e) { + // rethrow the simulated failure + throw e; + + } catch (Exception e) { + this.logFailure(e); + if (!this.isAborted()) { + throw new ServiceExecutionException(e); + } + } + } + + @Override + protected void scheduleTasks(JsonObject message, Scheduler scheduler) throws ServiceExecutionException { + super.scheduleTasks(message, scheduler); + + // check for a forced failure + MessageCounts counts = MESSAGE_COUNTS.get(); + int maxFailures = getInteger(message, "FAILURE_COUNT", 0); + int failureCount = counts.getFailureCount(); + if (maxFailures > 0 && failureCount < maxFailures) { + scheduler.createTaskBuilder("FORCED_FAILURE").parameter("failureCount", failureCount) + .parameter("maxFailures", maxFailures).parameter("message", toJsonText(message)) + .schedule(false); + } - followUpScheduler.commit(); } - } finally { - this.endHandling(action, parameters, multiplicity, jsonText); - } - } + @Override + protected void handleTask(String action, Map parameters, int multiplicity, Scheduler followUpScheduler) throws ServiceExecutionException { + String jsonText = this.taskAsJson(action, parameters, multiplicity); + this.beginHandling(action, parameters, multiplicity, jsonText); + + try { + // check if we are dealing with a forced-failure + if ("FORCED_FAILURE".equals(action)) { + int failureCount = (Integer) parameters.get("failureCount"); + int maxFailures = (Integer) parameters.get("maxFailures"); + String message = (String) parameters.get("message"); + throw new ServiceExecutionException( + "Simulated failure (" + failureCount + " of " + maxFailures + ") for message: " + message); + } + + // otherwise sleep for a period of time possibly with a random failure + long range = this.maxProcessingTime - this.minProcessingTime; + double percentage = PRNG.nextDouble(); + long processingTime = this.minProcessingTime + ((long) (percentage * (double) range)); + boolean failure = PRNG.nextDouble() < this.failureRate; + try { + Thread.sleep(processingTime); + } catch (InterruptedException ignore) { + // do nothing + } + + if (failure) { + throw new ServiceExecutionException("Simulated random failure for task: " + jsonText); + } + + if ("RECORD".equals(action) && (followUpScheduler != null) && (PRNG.nextDouble() < 0.50)) { + String dataSource = parameters.get(DATA_SOURCE_PARAMETER_KEY).toString(); + + // schedule a follow-up task + followUpScheduler.createTaskBuilder("INCREMENT_RECORD_COUNT") + .parameter(DATA_SOURCE_PARAMETER_KEY, dataSource).resource("DATA_SOURCE", dataSource) + .schedule(); + + followUpScheduler.commit(); + } + + } finally { + this.endHandling(action, parameters, multiplicity, jsonText); + } + } - @Override - public void doDestroy() { - // do nothing - } - } - - @ParameterizedTest - @ValueSource(ints = { 1, 2, 3, 4, 8 }) - public void basicTest(int concurrency) { - List messages = new LinkedList<>(); - messages.add(new Message(1, buildInfoMessage(1, - "CUSTOMERS", - "001", - 1, 2, 3))); - messages.add(new Message(2, buildInfoMessage(2, - "CUSTOMERS", - "002", - 1, 4))); - messages.add(new Message(3, buildInfoMessage(3, - "CUSTOMERS", - "003", - 2, 5))); - messages.add(new Message(4, buildInfoMessage(4, - "CUSTOMERS", - "004", - 4, 5))); - messages.add(new Message(5, buildInfoMessage(5, - "CUSTOMERS", - "005", - 6, 7))); - - this.performTest(messages, - messages.size(), - concurrency, - null, - null, - null, - null, - null, - 0.0, - null); - } - - @ParameterizedTest - @ValueSource(ints = { 1, 2, 3, 4, 8 }) - public void errantTest(int concurrency) { - List messages = new LinkedList<>(); - messages.add(new Message(1, buildInfoMessage(1, - "CUSTOMERS", - "001", - 1, 2, 3))); - messages.add(new Message(2, buildInfoMessage(2, - 1, - null, - "CUSTOMERS", - "002", - 1, 4))); - messages.add(new Message(3, buildInfoMessage(3, - "CUSTOMERS", - "003", - 2, 5))); - messages.add(new Message(4, buildInfoMessage(4, - 1, - null, - "CUSTOMERS", - "004", - 4, 5))); - messages.add(new Message(5, buildInfoMessage(5, - "CUSTOMERS", - "005", - 6, 7))); - - this.performTest(messages, - messages.size(), - concurrency, - null, - null, - 2500L, - null, - null, - 0.0, - null); - } - - @ParameterizedTest - @ValueSource(ints = { 8, 16, 24 }) - public void loadTest(int concurrency) { - List batches = new LinkedList<>(); - int messageCount = buildInfoBatches( - batches, - 2000, - List.of("CUSTOMERS", "EMPLOYEES", "VENDORS"), - 1, - 10, - 1000, - 3000, - 4, - 0.005); - - System.err.println(); - System.err.println("====================================================="); - System.err.println("Testing " + batches.size() + " batches comprising " - + messageCount + " messages with concurrency of " - + concurrency + "."); - - long start = System.nanoTime() / 1000000L; - this.performTest(batches, - messageCount, - concurrency, - 30, - 50L, - 5000L, - 2L, - 5L, - 0.0, - null); - long duration = (System.nanoTime() / 1000000L) - start; - System.err.println("TOTAL TIME: " + (duration) + " ms"); - } - - protected void performTest(List messages, - int messageCount, - Integer concurrency, - Integer dequeueCount, - Long dequeueSleep, - Long visibilityTimeout, - Long minProcessingTime, - Long maxProcessingTime, - Double failureRate, - Map> orderAfterMap) { - StringBuilder sb = new StringBuilder(); - String prefix = ""; - if (concurrency != null) { - sb.append(prefix); - sb.append("concurrency=[ " + concurrency + " ]"); - prefix = ", "; - } - if (dequeueCount == null) { - dequeueCount = 2; - } else { - sb.append(prefix); - sb.append("dequeueCount=[ " + dequeueCount + " ]"); - prefix = ", "; - } - if (dequeueSleep == null) { - dequeueSleep = 25L; - } else { - sb.append(prefix); - sb.append("dequeueSleep=[ " + dequeueSleep + " ]"); - prefix = ", "; - } - if (visibilityTimeout == null) { - visibilityTimeout = 12500L; - } else { - sb.append(prefix); - sb.append("visibilityTimeout=[ " + visibilityTimeout + " ]"); - prefix = ", "; - } - if (minProcessingTime == null) { - minProcessingTime = 75L; - } else { - sb.append(prefix); - sb.append("minProcessingTime=[ " + minProcessingTime + " ]"); - prefix = ", "; + @Override + public void doDestroy() { + // do nothing + } } - if (maxProcessingTime == null) { - maxProcessingTime = minProcessingTime; - } else { - sb.append(prefix); - sb.append("maxProcessingTime=[ " + maxProcessingTime + " ]"); - prefix = ", "; + + @ParameterizedTest + @ValueSource(ints = { 1, 2, 3, 4, 8 }) + public void basicTest(int concurrency) { + List messages = new LinkedList<>(); + messages.add(new Message(1, buildInfoMessage(1, "CUSTOMERS", "001", 1, 2, 3))); + messages.add(new Message(2, buildInfoMessage(2, "CUSTOMERS", "002", 1, 4))); + messages.add(new Message(3, buildInfoMessage(3, "CUSTOMERS", "003", 2, 5))); + messages.add(new Message(4, buildInfoMessage(4, "CUSTOMERS", "004", 4, 5))); + messages.add(new Message(5, buildInfoMessage(5, "CUSTOMERS", "005", 6, 7))); + + this.performTest(messages, messages.size(), concurrency, null, null, null, null, null, 0.0, null); } - if (failureRate == null) { - failureRate = 0.0; - } else { - sb.append(prefix); - sb.append("failureRate=[ " + failureRate + " ]"); - prefix = ", "; + + @ParameterizedTest + @ValueSource(ints = { 1, 2, 3, 4, 8 }) + public void errantTest(int concurrency) { + List messages = new LinkedList<>(); + messages.add(new Message(1, buildInfoMessage(1, "CUSTOMERS", "001", 1, 2, 3))); + messages.add(new Message(2, buildInfoMessage(2, 1, null, "CUSTOMERS", "002", 1, 4))); + messages.add(new Message(3, buildInfoMessage(3, "CUSTOMERS", "003", 2, 5))); + messages.add(new Message(4, buildInfoMessage(4, 1, null, "CUSTOMERS", "004", 4, 5))); + messages.add(new Message(5, buildInfoMessage(5, "CUSTOMERS", "005", 6, 7))); + + this.performTest(messages, messages.size(), concurrency, null, null, 2500L, null, null, 0.0, null); } - String testInfo = sb.toString(); - TestMessageConsumer consumer = new TestMessageConsumer( - dequeueCount, dequeueSleep, visibilityTimeout, messages); + @ParameterizedTest + @ValueSource(ints = { 8, 16, 24 }) + public void loadTest(int concurrency) { + List batches = new LinkedList<>(); + int messageCount = buildInfoBatches(batches, 2000, List.of("CUSTOMERS", "EMPLOYEES", "VENDORS"), 1, 10, 1000, + 3000, 4, 0.005); - TestService service = new TestService( - minProcessingTime, maxProcessingTime, failureRate); + System.err.println(); + System.err.println("====================================================="); + System.err.println("Testing " + batches.size() + " batches comprising " + messageCount + + " messages with concurrency of " + concurrency + "."); + + long start = System.nanoTime() / 1000000L; + this.performTest(batches, messageCount, concurrency, 30, 50L, 5000L, 2L, 5L, 0.0, null); + long duration = (System.nanoTime() / 1000000L) - start; + System.err.println("TOTAL TIME: " + (duration) + " ms"); + } + + protected void performTest(List messages, int messageCount, Integer concurrency, Integer dequeueCount, Long dequeueSleep, Long visibilityTimeout, Long minProcessingTime, Long maxProcessingTime, Double failureRate, Map> orderAfterMap) { + StringBuilder sb = new StringBuilder(); + String prefix = ""; + if (concurrency != null) { + sb.append(prefix); + sb.append("concurrency=[ " + concurrency + " ]"); + prefix = ", "; + } + if (dequeueCount == null) { + dequeueCount = 2; + } else { + sb.append(prefix); + sb.append("dequeueCount=[ " + dequeueCount + " ]"); + prefix = ", "; + } + if (dequeueSleep == null) { + dequeueSleep = 25L; + } else { + sb.append(prefix); + sb.append("dequeueSleep=[ " + dequeueSleep + " ]"); + prefix = ", "; + } + if (visibilityTimeout == null) { + visibilityTimeout = 12500L; + } else { + sb.append(prefix); + sb.append("visibilityTimeout=[ " + visibilityTimeout + " ]"); + prefix = ", "; + } + if (minProcessingTime == null) { + minProcessingTime = 75L; + } else { + sb.append(prefix); + sb.append("minProcessingTime=[ " + minProcessingTime + " ]"); + prefix = ", "; + } + if (maxProcessingTime == null) { + maxProcessingTime = minProcessingTime; + } else { + sb.append(prefix); + sb.append("maxProcessingTime=[ " + maxProcessingTime + " ]"); + prefix = ", "; + } + if (failureRate == null) { + failureRate = 0.0; + } else { + sb.append(prefix); + sb.append("failureRate=[ " + failureRate + " ]"); + prefix = ", "; + } + String testInfo = sb.toString(); - JsonObjectBuilder job = Json.createObjectBuilder(); - if (concurrency != null) { - job.add(CONCURRENCY_KEY, concurrency * 8); - } - JsonObject consumerConfig = job.build(); - - AccessToken token = null; - String providerName = null; - ConnectionPool pool = null; - try { - File dbFile = File.createTempFile("sz_follow_up_", ".db"); - - providerName = dbFile.getCanonicalPath(); - - boolean usePostgreSQL = Boolean.TRUE.toString().equals( - System.getProperty("com.senzing.listener.test.postgresql")); - - Connector connector = null; - if (usePostgreSQL) { - connector = () -> { - String url = "jdbc:postgresql://localhost:5500/test"; - return DriverManager.getConnection( - url, "user", "password"); - }; - } else { - connector = new SQLiteConnector(dbFile); - } - - pool = new ConnectionPool(connector, 1); - - ConnectionProvider provider = new PoolConnectionProvider(pool); - - token = ConnectionProvider.REGISTRY.bind(providerName, provider); - - JsonObjectBuilder builder1 = Json.createObjectBuilder(); - JsonObjectBuilder builder2 = Json.createObjectBuilder(); - builder1.add(AbstractSchedulingService.CONCURRENCY_KEY, concurrency); - if (usePostgreSQL) { - builder1.add(CLEAN_DATABASE_KEY, true); - } - builder1.add(CONNECTION_PROVIDER_KEY, providerName); - builder2.add(AbstractListenerService.SCHEDULING_SERVICE_CONFIG_KEY, - builder1); - if (usePostgreSQL) { - builder2.add(AbstractListenerService.SCHEDULING_SERVICE_CLASS_KEY, - PostgreSQLSchedulingService.class.getName()); - } - - service.init(builder2.build()); - consumer.init(consumerConfig); - consumer.consume(service); - - } catch (Exception exception) { - fail(exception); - } finally { - if (token != null) { + TestMessageConsumer consumer = new TestMessageConsumer(dequeueCount, dequeueSleep, visibilityTimeout, messages); + + TestService service = new TestService(minProcessingTime, maxProcessingTime, failureRate); + + JsonObjectBuilder job = Json.createObjectBuilder(); + if (concurrency != null) { + job.add(CONCURRENCY_KEY, concurrency * 8); + } + JsonObject consumerConfig = job.build(); + + AccessToken token = null; + String providerName = null; + ConnectionPool pool = null; try { - ConnectionProvider.REGISTRY.unbind(providerName, token); + File dbFile = File.createTempFile("sz_follow_up_", ".db"); + + providerName = dbFile.getCanonicalPath(); + + boolean usePostgreSQL = Boolean.TRUE.toString() + .equals(System.getProperty("com.senzing.listener.test.postgresql")); + + Connector connector = null; + if (usePostgreSQL) { + connector = () -> { + String url = "jdbc:postgresql://localhost:5500/test"; + return DriverManager.getConnection(url, "user", "password"); + }; + } else { + connector = new SQLiteConnector(dbFile); + } + + pool = new ConnectionPool(connector, 1); + + ConnectionProvider provider = new PoolConnectionProvider(pool); + + token = ConnectionProvider.REGISTRY.bind(providerName, provider); + + JsonObjectBuilder builder1 = Json.createObjectBuilder(); + JsonObjectBuilder builder2 = Json.createObjectBuilder(); + builder1.add(AbstractSchedulingService.CONCURRENCY_KEY, concurrency); + if (usePostgreSQL) { + builder1.add(CLEAN_DATABASE_KEY, true); + } + builder1.add(CONNECTION_PROVIDER_KEY, providerName); + builder2.add(AbstractListenerService.SCHEDULING_SERVICE_CONFIG_KEY, builder1); + if (usePostgreSQL) { + builder2.add(AbstractListenerService.SCHEDULING_SERVICE_CLASS_KEY, + PostgreSQLSchedulingService.class.getName()); + } + + service.init(builder2.build()); + consumer.init(consumerConfig); + consumer.consume(service); - } catch (NamingException ignore) { - // do nothing + } catch (Exception exception) { + fail(exception); + } finally { + if (token != null) { + try { + ConnectionProvider.REGISTRY.unbind(providerName, token); + + } catch (NamingException ignore) { + // do nothing + } + } } - } - } - // wait success - service.awaitSuccess(consumer, messageCount, pool); - try { - Thread.sleep(2000L); - } catch (InterruptedException ignore) { - // do nothing - } - consumer.destroy(); - // Map stats = printStatistics(consumer, service); - Map stats = consumer.getStatistics(); - - Number messageRetryCount = stats.get(Stat.messageRetryCount); - Number processRetryCount = stats.get(Stat.processRetryCount); - Number statsFailureCount = stats.get(Stat.processFailureCount); - - if (failureRate == 0.0) { - assertEquals(consumer.getExpectedFailureCount(), statsFailureCount, - "Wrong number of info message failures"); - assertEquals(consumer.getExpectedMessageRetryCount(), - messageRetryCount, - "Wrong number of message (batch) retries"); - assertEquals(consumer.getExpectedInfoMessageRetryCount(), - processRetryCount, - "Wrong number of info message retries"); + // wait success + service.awaitSuccess(consumer, messageCount, pool); + try { + Thread.sleep(2000L); + } catch (InterruptedException ignore) { + // do nothing + } + consumer.destroy(); + // Map stats = printStatistics(consumer, service); + Map stats = consumer.getStatistics(); + + Number messageRetryCount = stats.get(Stat.messageRetryCount); + Number processRetryCount = stats.get(Stat.processRetryCount); + Number statsFailureCount = stats.get(Stat.processFailureCount); + + if (failureRate == 0.0) { + assertEquals(consumer.getExpectedFailureCount(), statsFailureCount, + "Wrong number of info message failures"); + assertEquals(consumer.getExpectedMessageRetryCount(), messageRetryCount, + "Wrong number of message (batch) retries"); + assertEquals(consumer.getExpectedInfoMessageRetryCount(), processRetryCount, + "Wrong number of info message retries"); + } + + // get the exceptions + int failureCount = service.getFailures().size(); + if (failureCount > 0) { + for (Exception e : service.getFailures()) { + System.err.println(); + System.err.println("================================================="); + System.err.println(e.getMessage()); + System.err.println(formatStackTrace(e.getStackTrace())); + } + fail("Failed with " + failureCount + " exceptions. " + testInfo + ", failures=[ " + service.getFailures() + + " ]"); + } + + // get the counts + Map countsMap = service.getMessageCounts(); + List countsList = new ArrayList<>(countsMap.values()); + Collections.sort(countsList); + + // destroy the service + service.destroy(); + + // check the message counts + for (Message message : messages) { + String messageBody = message.getBody(); + JsonObject jsonObject = null; + try { + jsonObject = parseJsonObject(messageBody); + } catch (Exception e) { + // bad JSON -- skip this one + continue; + } + Integer messageId = getInteger(jsonObject, "MESSAGE_ID"); + if (messageId == null) + continue; + MessageCounts counts = countsMap.get(messageId); + + if (counts == null) { + fail("Failed to find statistics for message: " + messageBody); + } + assertTrue((counts.getSuccessCount() > 0), "Message never succeeded: " + counts + " / " + messageBody); + + int maxFailures = getInteger(jsonObject, "FAILURE_COUNT", -1); + if ((maxFailures < 0 && failureRate == 0) || (maxFailures == 0)) { + assertEquals(0, counts.getFailureCount(), "Received a failure for a message where none was " + + "expected: " + counts + " / " + messageBody); + } else if (maxFailures > 0) { + assertEquals(maxFailures, counts.getFailureCount(), "Received an unexpected number of failures for " + + "a message: " + counts + " / " + messageBody); + } + } + + if (orderAfterMap != null) { + orderAfterMap.forEach((messageId, afterSet) -> { + MessageCounts msgCounts = countsMap.get(messageId); + + if (msgCounts == null) { + fail("Bad test data. Unrecognized message ID (" + messageId + ") in ordering map: " + orderAfterMap + + " / " + countsMap); + } + afterSet.forEach(afterMessageId -> { + MessageCounts afterCounts = countsMap.get(afterMessageId); + if (afterCounts == null) { + fail("Bad test data. Unrecognized message ID (" + afterMessageId + ") in ordering map: " + + orderAfterMap + " / " + countsMap); + } + long msgBegin = msgCounts.getLastBeginTime(); + long afterBegin = afterCounts.getLastBeginTime(); + assertTrue(msgBegin > afterBegin, + "Message " + messageId + " was unexpectedly " + "processed before message " + afterMessageId + + ": " + msgBegin + " <= " + afterBegin + " / " + + MessageCounts.toString(countsList)); + }); + }); + } } - // get the exceptions - int failureCount = service.getFailures().size(); - if (failureCount > 0) { - for (Exception e : service.getFailures()) { + private static Map printStatistics(TestMessageConsumer consumer, TestService service, ConnectionPool pool) { System.err.println(); - System.err.println("================================================="); - e.printStackTrace(); - } - fail("Failed with " + failureCount + " exceptions. " + testInfo - + ", failures=[ " + service.getFailures() + " ]"); - } + System.err.println("====================================================="); + System.err.println("MESSAGES COMPLETED: " + service.getSuccessCount()); + Map stats = consumer.getStatistics(); + + if (pool != null) { + System.err.println("POOL STATISTICS: "); + Map poolStats = pool.getStatistics(); + poolStats.forEach((statistic, value) -> { + System.err.println(" " + statistic + ": " + value + " " + statistic.getUnits()); + }); + System.err.println(); + } + System.err.println("CONSUMER STATISTICS:"); + System.err.println(" dequeueCount: " + consumer.getDequeueCount() + " messages"); + System.err.println(" dequeueSleep: " + consumer.getDequeueSleep() + " ms"); + System.err.println(" visibilityTimeout: " + consumer.getVisibilityTimeout() + " ms"); + System.err.println( + " expectedServiceProcessFailureCount: " + consumer.getExpectedFailureCount() + " info messages"); + System.err.println(" expectedMessageRetryCount: " + consumer.getExpectedMessageRetryCount() + " messages"); + System.err.println( + " expectedInfoMessageRetryCount: " + consumer.getExpectedInfoMessageRetryCount() + " info messages"); - // get the counts - Map countsMap = service.getMessageCounts(); - List countsList = new ArrayList<>(countsMap.values()); - Collections.sort(countsList); - - // destroy the service - service.destroy(); - - // check the message counts - for (Message message : messages) { - String messageBody = message.getBody(); - JsonObject jsonObject = null; - try { - jsonObject = parseJsonObject(messageBody); - } catch (Exception e) { - // bad JSON -- skip this one - continue; - } - Integer messageId = getInteger(jsonObject, "MESSAGE_ID"); - if (messageId == null) - continue; - MessageCounts counts = countsMap.get(messageId); - - if (counts == null) { - fail("Failed to find statistics for message: " + messageBody); - } - assertTrue((counts.getSuccessCount() > 0), - "Message never succeeded: " + counts + " / " + messageBody); - - int maxFailures = getInteger(jsonObject, "FAILURE_COUNT", -1); - if ((maxFailures < 0 && failureRate == 0) || (maxFailures == 0)) { - assertEquals(0, counts.getFailureCount(), - "Received a failure for a message where none was " - + "expected: " + counts + " / " + messageBody); - } else if (maxFailures > 0) { - assertEquals(maxFailures, counts.getFailureCount(), - "Received an unexpected number of failures for " - + "a message: " + counts + " / " + messageBody); - } - } + stats.forEach((key, value) -> { + String units = key.getUnits(); + System.out.println(" " + key + ": " + value + ((units != null) ? " " + units : "")); + }); - if (orderAfterMap != null) { - orderAfterMap.forEach((messageId, afterSet) -> { - MessageCounts msgCounts = countsMap.get(messageId); - - if (msgCounts == null) { - fail("Bad test data. Unrecognized message ID (" + messageId - + ") in ordering map: " + orderAfterMap + " / " - + countsMap); - } - afterSet.forEach(afterMessageId -> { - MessageCounts afterCounts = countsMap.get(afterMessageId); - if (afterCounts == null) { - fail("Bad test data. Unrecognized message ID (" + afterMessageId - + ") in ordering map: " + orderAfterMap + " / " - + countsMap); - } - long msgBegin = msgCounts.getLastBeginTime(); - long afterBegin = afterCounts.getLastBeginTime(); - assertTrue(msgBegin > afterBegin, - "Message " + messageId + " was unexpectedly " - + "processed before message " + afterMessageId + ": " - + msgBegin + " <= " + afterBegin + " / " - + MessageCounts.toString(countsList)); + System.err.println(); + System.err.println("-----------------------------------------------------"); + AbstractSchedulingService schedulingService = (AbstractSchedulingService) service.getSchedulingService(); + Map stats2 = schedulingService.getStatistics(); + System.err.println("SCHEDULING STATISTICS:"); + stats2.forEach((key, value) -> { + String units = key.getUnits(); + System.out.println(" " + key + ": " + value + ((units != null) ? " " + units : "")); }); - }); - } - } - - private static Map printStatistics( - TestMessageConsumer consumer, TestService service, ConnectionPool pool) { - System.err.println(); - System.err.println("====================================================="); - System.err.println("MESSAGES COMPLETED: " + service.getSuccessCount()); - Map stats = consumer.getStatistics(); - - if (pool != null) { - System.err.println("POOL STATISTICS: "); - Map poolStats = pool.getStatistics(); - poolStats.forEach((statistic, value) -> { - System.err.println( - " " + statistic + ": " + value + " " + statistic.getUnits()); - }); - System.err.println(); + + return stats; } - System.err.println("CONSUMER STATISTICS:"); - System.err.println( - " dequeueCount: " + consumer.getDequeueCount() + " messages"); - System.err.println( - " dequeueSleep: " + consumer.getDequeueSleep() + " ms"); - System.err.println( - " visibilityTimeout: " + consumer.getVisibilityTimeout() + " ms"); - System.err.println( - " expectedServiceProcessFailureCount: " - + consumer.getExpectedFailureCount() + " info messages"); - System.err.println( - " expectedMessageRetryCount: " - + consumer.getExpectedMessageRetryCount() + " messages"); - System.err.println( - " expectedInfoMessageRetryCount: " - + consumer.getExpectedInfoMessageRetryCount() + " info messages"); - - stats.forEach((key, value) -> { - String units = key.getUnits(); - System.out.println(" " + key + ": " + value - + ((units != null) ? " " + units : "")); - }); - - System.err.println(); - System.err.println("-----------------------------------------------------"); - AbstractSchedulingService schedulingService = (AbstractSchedulingService) service.getSchedulingService(); - Map stats2 = schedulingService.getStatistics(); - System.err.println("SCHEDULING STATISTICS:"); - stats2.forEach((key, value) -> { - String units = key.getUnits(); - System.out.println(" " + key + ": " + value - + ((units != null) ? " " + units : "")); - }); - - return stats; - } }