步骤 8:导出并验证分类账中的日记账数据 - 亚马逊 Quantum Ledger 数据库(亚马逊QLDB)

本文属于机器翻译版本。若本译文内容与英语原文存在差异,则一律以英文原文为准。

步骤 8:导出并验证分类账中的日记账数据

重要

终止支持通知:现有客户将能够使用亚马逊,QLDB直到 2025 年 7 月 31 日终止支持。有关更多详细信息,请参阅将亚马逊QLDB账本迁移到亚马逊 Aurora Postgr SQL e。

在 Amazon 中QLDB,您可以出于各种目的访问账本中的日记内容,例如数据保留、分析和审计。有关更多信息,请参阅 从 Amazon 导出日记数据 QLDB

在此步骤中,您将vehicle-registration分类账中的日记账数据块导出至 Amazon S3 存储桶中。然后,您可使用导出的数据验证日记账数据块和每个区块中各个哈希组件之间的哈希链。

您使用的 AWS Identity and Access Management (IAM) 委托人实体必须具有足够的IAM权限才能在您的中创建 Amazon S3 存储桶 AWS 账户。有关更多信息,请参阅 Amazon S3 用户指南 中的 Policies and Permissions in Amazon S3 中的策略和权限。您还必须有权创建附带权限策略的IAM角色,该策略允许QLDB将对象写入您的 Amazon S3 存储桶。要了解更多信息,请参阅《IAM用户指南》中的访问IAM资源所需的权限

导出和验证日记账数据
  1. 查看以下文件(JournalBlock.java),该文件代表日记账数据块及其数据内容。它包括一个名为verifyBlockHash()的方法,该方法演示了如何计算区块哈希的每个组成部分。

    /* * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: MIT-0 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this * software and associated documentation files (the "Software"), to deal in the Software * without restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package software.amazon.qldb.tutorial.qldb; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.fasterxml.jackson.dataformat.ion.IonTimestampSerializers; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.qldb.tutorial.Constants; import software.amazon.qldb.tutorial.Verifier; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Date; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import static java.nio.ByteBuffer.wrap; /** * Represents a JournalBlock that was recorded after executing a transaction * in the ledger. */ public final class JournalBlock { private static final Logger log = LoggerFactory.getLogger(JournalBlock.class); private BlockAddress blockAddress; private String transactionId; @JsonSerialize(using = IonTimestampSerializers.IonTimestampJavaDateSerializer.class) private Date blockTimestamp; private byte[] blockHash; private byte[] entriesHash; private byte[] previousBlockHash; private byte[][] entriesHashList; private TransactionInfo transactionInfo; private RedactionInfo redactionInfo; private List<QldbRevision> revisions; @JsonCreator public JournalBlock(@JsonProperty("blockAddress") final BlockAddress blockAddress, @JsonProperty("transactionId") final String transactionId, @JsonProperty("blockTimestamp") final Date blockTimestamp, @JsonProperty("blockHash") final byte[] blockHash, @JsonProperty("entriesHash") final byte[] entriesHash, @JsonProperty("previousBlockHash") final byte[] previousBlockHash, @JsonProperty("entriesHashList") final byte[][] entriesHashList, @JsonProperty("transactionInfo") final TransactionInfo transactionInfo, @JsonProperty("redactionInfo") final RedactionInfo redactionInfo, @JsonProperty("revisions") final List<QldbRevision> revisions) { this.blockAddress = blockAddress; this.transactionId = transactionId; this.blockTimestamp = blockTimestamp; this.blockHash = blockHash; this.entriesHash = entriesHash; this.previousBlockHash = previousBlockHash; this.entriesHashList = entriesHashList; this.transactionInfo = transactionInfo; this.redactionInfo = redactionInfo; this.revisions = revisions; } public BlockAddress getBlockAddress() { return blockAddress; } public String getTransactionId() { return transactionId; } public Date getBlockTimestamp() { return blockTimestamp; } public byte[][] getEntriesHashList() { return entriesHashList; } public TransactionInfo getTransactionInfo() { return transactionInfo; } public RedactionInfo getRedactionInfo() { return redactionInfo; } public List<QldbRevision> getRevisions() { return revisions; } public byte[] getEntriesHash() { return entriesHash; } public byte[] getBlockHash() { return blockHash; } public byte[] getPreviousBlockHash() { return previousBlockHash; } @Override public String toString() { return "JournalBlock{" + "blockAddress=" + blockAddress + ", transactionId='" + transactionId + '\'' + ", blockTimestamp=" + blockTimestamp + ", blockHash=" + Arrays.toString(blockHash) + ", entriesHash=" + Arrays.toString(entriesHash) + ", previousBlockHash=" + Arrays.toString(previousBlockHash) + ", entriesHashList=" + Arrays.toString(entriesHashList) + ", transactionInfo=" + transactionInfo + ", redactionInfo=" + redactionInfo + ", revisions=" + revisions + '}'; } @Override public boolean equals(final Object o) { if (this == o) { return true; } if (!(o instanceof JournalBlock)) { return false; } final JournalBlock that = (JournalBlock) o; if (!getBlockAddress().equals(that.getBlockAddress())) { return false; } if (!getTransactionId().equals(that.getTransactionId())) { return false; } if (!getBlockTimestamp().equals(that.getBlockTimestamp())) { return false; } if (!Arrays.equals(getBlockHash(), that.getBlockHash())) { return false; } if (!Arrays.equals(getEntriesHash(), that.getEntriesHash())) { return false; } if (!Arrays.equals(getPreviousBlockHash(), that.getPreviousBlockHash())) { return false; } if (!Arrays.deepEquals(getEntriesHashList(), that.getEntriesHashList())) { return false; } if (!getTransactionInfo().equals(that.getTransactionInfo())) { return false; } if (getRedactionInfo() != null ? !getRedactionInfo().equals(that.getRedactionInfo()) : that.getRedactionInfo() != null) { return false; } return getRevisions() != null ? getRevisions().equals(that.getRevisions()) : that.getRevisions() == null; } @Override public int hashCode() { int result = getBlockAddress().hashCode(); result = 31 * result + getTransactionId().hashCode(); result = 31 * result + getBlockTimestamp().hashCode(); result = 31 * result + Arrays.hashCode(getBlockHash()); result = 31 * result + Arrays.hashCode(getEntriesHash()); result = 31 * result + Arrays.hashCode(getPreviousBlockHash()); result = 31 * result + Arrays.deepHashCode(getEntriesHashList()); result = 31 * result + getTransactionInfo().hashCode(); result = 31 * result + (getRedactionInfo() != null ? getRedactionInfo().hashCode() : 0); result = 31 * result + (getRevisions() != null ? getRevisions().hashCode() : 0); return result; } /** * This method validates that the hashes of the components of a journal block make up the block * hash that is provided with the block itself. * * The components that contribute to the hash of the journal block consist of the following: * - user transaction information (contained in [transactionInfo]) * - user redaction information (contained in [redactionInfo]) * - user revisions (contained in [revisions]) * - hashes of internal-only system metadata (contained in [revisions] and in [entriesHashList]) * - the previous block hash * * If any of the computed hashes of user information cannot be validated or any of the system * hashes do not result in the correct computed values, this method will throw an IllegalArgumentException. * * Internal-only system metadata is represented by its hash, and can be present in the form of certain * items in the [revisions] list that only contain a hash and no user data, as well as some hashes * in [entriesHashList]. * * To validate that the hashes of the user data are valid components of the [blockHash], this method * performs the following steps: * * 1. Compute the hash of the [transactionInfo] and validate that it is included in the [entriesHashList]. * 2. Compute the hash of the [redactionInfo], if present, and validate that it is included in the [entriesHashList]. * 3. Validate the hash of each user revision was correctly computed and matches the hash published * with that revision. * 4. Compute the hash of the [revisions] by treating the revision hashes as the leaf nodes of a Merkle tree * and calculating the root hash of that tree. Then validate that hash is included in the [entriesHashList]. * 5. Compute the hash of the [entriesHashList] by treating the hashes as the leaf nodes of a Merkle tree * and calculating the root hash of that tree. Then validate that hash matches [entriesHash]. * 6. Finally, compute the block hash by computing the hash resulting from concatenating the [entriesHash] * and previous block hash, and validate that the result matches the [blockHash] provided by QLDB with the block. * * This method is called by ValidateQldbHashChain::verify for each journal block to validate its * contents before verifying that the hash chain between consecutive blocks is correct. */ public void verifyBlockHash() { Set<ByteBuffer> entriesHashSet = new HashSet<>(); Arrays.stream(entriesHashList).forEach(hash -> entriesHashSet.add(wrap(hash).asReadOnlyBuffer())); byte[] computedTransactionInfoHash = computeTransactionInfoHash(); if (!entriesHashSet.contains(wrap(computedTransactionInfoHash).asReadOnlyBuffer())) { throw new IllegalArgumentException( "Block transactionInfo hash is not contained in the QLDB block entries hash list."); } if (redactionInfo != null) { byte[] computedRedactionInfoHash = computeRedactionInfoHash(); if (!entriesHashSet.contains(wrap(computedRedactionInfoHash).asReadOnlyBuffer())) { throw new IllegalArgumentException( "Block redactionInfo hash is not contained in the QLDB block entries hash list."); } } if (revisions != null) { revisions.forEach(QldbRevision::verifyRevisionHash); byte[] computedRevisionsHash = computeRevisionsHash(); if (!entriesHashSet.contains(wrap(computedRevisionsHash).asReadOnlyBuffer())) { throw new IllegalArgumentException( "Block revisions list hash is not contained in the QLDB block entries hash list."); } } byte[] computedEntriesHash = computeEntriesHash(); if (!Arrays.equals(computedEntriesHash, entriesHash)) { throw new IllegalArgumentException("Computed entries hash does not match entries hash provided in the block."); } byte[] computedBlockHash = Verifier.dot(computedEntriesHash, previousBlockHash); if (!Arrays.equals(computedBlockHash, blockHash)) { throw new IllegalArgumentException("Computed block hash does not match block hash provided in the block."); } } private byte[] computeTransactionInfoHash() { try { return QldbIonUtils.hashIonValue(Constants.MAPPER.writeValueAsIonValue(transactionInfo)); } catch (IOException e) { throw new IllegalArgumentException("Could not compute transactionInfo hash to verify block hash.", e); } } private byte[] computeRedactionInfoHash() { try { return QldbIonUtils.hashIonValue(Constants.MAPPER.writeValueAsIonValue(redactionInfo)); } catch (IOException e) { throw new IllegalArgumentException("Could not compute redactionInfo hash to verify block hash.", e); } } private byte[] computeRevisionsHash() { return Verifier.calculateMerkleTreeRootHash(revisions.stream().map(QldbRevision::getHash).collect(Collectors.toList())); } private byte[] computeEntriesHash() { return Verifier.calculateMerkleTreeRootHash(Arrays.asList(entriesHashList)); } }
  2. 编译并运行以下程序(ValidateQldbHashChain.java),以执行以下步骤:

    1. 将账本中的日记vehicle-registration账块导出到名为qldb-tutorial-journal-export-111122223333(用您的 AWS 账户 数字替换)的 Amazon S3 存储桶中。

    2. 通过调用verifyBlockHash()来验证每个区块中的各个哈希组件。

    3. 验证日记账数据块之间的哈希链。

    /* * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: MIT-0 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this * software and associated documentation files (the "Software"), to deal in the Software * without restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, * INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A * PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package software.amazon.qldb.tutorial; import com.amazonaws.services.qldb.model.ExportJournalToS3Result; import com.amazonaws.services.qldb.model.S3EncryptionConfiguration; import com.amazonaws.services.qldb.model.S3ObjectEncryptionType; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import java.time.Instant; import java.util.Arrays; import java.util.List; import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import com.amazonaws.services.securitytoken.model.GetCallerIdentityRequest; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import software.amazon.qldb.tutorial.qldb.JournalBlock; /** * Validate the hash chain of a QLDB ledger by stepping through its S3 export. * * This code accepts an exportId as an argument, if exportId is passed the code * will use that or request QLDB to generate a new export to perform QLDB hash * chain validation. * * This code expects that you have AWS credentials setup per: * http://docs.aws.amazon.com/java-sdk/latest/developer-guide/setup-credentials.html */ public final class ValidateQldbHashChain { public static final Logger log = LoggerFactory.getLogger(ValidateQldbHashChain.class); private static final int TIME_SKEW = 20; private ValidateQldbHashChain() { } /** * Export journal contents to a S3 bucket. * * @return the ExportId of the journal export. * @throws InterruptedException if the thread is interrupted while waiting for export to complete. */ private static String createExport() throws InterruptedException { String accountId = AWSSecurityTokenServiceClientBuilder.defaultClient() .getCallerIdentity(new GetCallerIdentityRequest()).getAccount(); String bucketName = Constants.JOURNAL_EXPORT_S3_BUCKET_NAME_PREFIX + "-" + accountId; String prefix = Constants.LEDGER_NAME + "-" + Instant.now().getEpochSecond() + "/"; S3EncryptionConfiguration encryptionConfiguration = new S3EncryptionConfiguration() .withObjectEncryptionType(S3ObjectEncryptionType.SSE_S3); ExportJournalToS3Result exportJournalToS3Result = ExportJournal.createJournalExportAndAwaitCompletion(Constants.LEDGER_NAME, bucketName, prefix, null, encryptionConfiguration, ExportJournal.DEFAULT_EXPORT_TIMEOUT_MS); return exportJournalToS3Result.getExportId(); } /** * Validates that the chain hash on the {@link JournalBlock} is valid. * * @param journalBlocks * {@link JournalBlock} containing hashes to validate. * @throws IllegalStateException if previous block hash does not match. */ public static void verify(final List<JournalBlock> journalBlocks) { if (journalBlocks.size() == 0) { return; } journalBlocks.stream().reduce(null, (previousJournalBlock, journalBlock) -> { journalBlock.verifyBlockHash(); if (previousJournalBlock == null) { return journalBlock; } if (!Arrays.equals(previousJournalBlock.getBlockHash(), journalBlock.getPreviousBlockHash())) { throw new IllegalStateException("Previous block hash doesn't match."); } byte[] blockHash = Verifier.dot(journalBlock.getEntriesHash(), previousJournalBlock.getBlockHash()); if (!Arrays.equals(blockHash, journalBlock.getBlockHash())) { throw new IllegalStateException("Block hash doesn't match entriesHash dot previousBlockHash, the chain is " + "broken."); } return journalBlock; }); } public static void main(final String... args) throws InterruptedException { try { String exportId; if (args.length == 1) { exportId = args[0]; log.info("Validating QLDB hash chain for exportId: " + exportId); } else { log.info("Requesting QLDB to create an export."); exportId = createExport(); } List<JournalBlock> journalBlocks = JournalS3ExportReader.readExport(DescribeJournalExport.describeExport(Constants.LEDGER_NAME, exportId), AmazonS3ClientBuilder.defaultClient()); verify(journalBlocks); } catch (Exception e) { log.error("Unable to perform hash chain verification.", e); throw e; } } }

如果您不再需要使用vehicle-registration分类账,请继续步骤 9:(可选)清除资源