-
Notifications
You must be signed in to change notification settings - Fork 8.8k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
This commit introduces an in-memory cache for the block storage of the ledger. It caches new blocks that are committed and assumes blocks are committed in-order and with consecutive sequences. The block iterators now attempt to retrieve the blocks from the cache if possible before going to the block storage. The intent is twofold: 1) Speedup the block Deliver API by not doing disk I/O when clients (peers, orderers) fetch blocks. 2) Reduce the impact of the deliver API from writing new blocks into the ledger. Signed-off-by: Yacov Manevich <yacov.manevich@ibm.com>
- Loading branch information
1 parent
a8e078f
commit 45cefb2
Showing
4 changed files
with
303 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,105 @@ | ||
/* | ||
Copyright IBM Corp. All Rights Reserved. | ||
SPDX-License-Identifier: Apache-2.0 | ||
*/ | ||
|
||
package blkstorage | ||
|
||
import ( | ||
"fmt" | ||
"sync" | ||
|
||
"github.com/hyperledger/fabric-protos-go/common" | ||
) | ||
|
||
const ( | ||
estimatedBlockSize = 512 * 1024 | ||
) | ||
|
||
type cache struct { | ||
cacheLock sync.RWMutex | ||
disabled bool | ||
cache map[uint64]cachedBlock | ||
sizeBytes int | ||
maxSeq uint64 | ||
maxSizeBytes int | ||
} | ||
|
||
type cachedBlock struct { | ||
block *common.Block | ||
blockSize int | ||
} | ||
|
||
func newCache(maxSizeBytes int) *cache { | ||
isCacheDisabled := maxSizeBytes == 0 | ||
|
||
return &cache{ | ||
disabled: isCacheDisabled, | ||
cache: make(map[uint64]cachedBlock, maxSizeBytes/estimatedBlockSize), | ||
maxSizeBytes: maxSizeBytes, | ||
} | ||
} | ||
|
||
func (c *cache) get(seq uint64) (*common.Block, bool) { | ||
if c.disabled { | ||
return nil, false | ||
} | ||
|
||
c.cacheLock.RLock() | ||
defer c.cacheLock.RUnlock() | ||
|
||
cachedBlock, exists := c.cache[seq] | ||
return cachedBlock.block, exists | ||
} | ||
|
||
func (c *cache) put(block *common.Block, blockSize int) { | ||
if c.disabled { | ||
return | ||
} | ||
|
||
seq := block.Header.Number | ||
|
||
if c.maxSeq > seq { | ||
return | ||
} | ||
|
||
if c.maxSeq+1 < seq && c.maxSeq != 0 { | ||
panic(fmt.Sprintf("detected out of order block insertion: attempted to insert block number %d but highest block is %d", | ||
seq, c.maxSeq)) | ||
} | ||
|
||
if c.maxSeq == seq && c.maxSeq != 0 { | ||
panic(fmt.Sprintf("detected insertion of the same block (%d) twice", seq)) | ||
} | ||
|
||
// Insert the block to the cache | ||
c.maxSeq = seq | ||
|
||
c.cacheLock.Lock() | ||
defer c.cacheLock.Unlock() | ||
|
||
c.sizeBytes += blockSize | ||
|
||
c.cache[seq] = cachedBlock{block: block, blockSize: blockSize} | ||
|
||
// If our cache is too big, evict the oldest block | ||
for c.sizeBytes > c.maxSizeBytes { | ||
c.evictOldestCachedBlock() | ||
} | ||
} | ||
|
||
func (c *cache) evictOldestCachedBlock() { | ||
cachedItemCount := len(c.cache) | ||
|
||
// Given a series of k > 0 consecutive elements: {i, i+1, i+2, ... , i+k-1} | ||
// If the max sequence is j then j=i+k-1, and then the lowest element i is j-k+1 | ||
evictedIndex := c.maxSeq - uint64(cachedItemCount) + 1 | ||
evictedBlock, exists := c.cache[evictedIndex] | ||
if !exists { | ||
panic(fmt.Sprintf("programming error: last stored block sequence is %d and cached block count"+ | ||
" is %d but index to be evicted %d was not found", c.maxSeq, cachedItemCount, evictedIndex)) | ||
} | ||
delete(c.cache, evictedIndex) // Delete minimum entry | ||
c.sizeBytes -= evictedBlock.blockSize | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,171 @@ | ||
/* | ||
Copyright IBM Corp. All Rights Reserved. | ||
SPDX-License-Identifier: Apache-2.0 | ||
*/ | ||
|
||
package blkstorage | ||
|
||
import ( | ||
"testing" | ||
|
||
"github.com/stretchr/testify/assert" | ||
|
||
"github.com/hyperledger/fabric-protos-go/common" | ||
"github.com/stretchr/testify/require" | ||
) | ||
|
||
func TestCacheDisabled(t *testing.T) { | ||
c := newCache(0) | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{}}, 0) | ||
block, exists := c.get(0) | ||
|
||
assertNotCached(t, exists, block) | ||
} | ||
|
||
func TestNotCachingTooSmallEntry(t *testing.T) { | ||
c := newCache(10) | ||
|
||
for i := 100; i < 105; i++ { | ||
block := &common.Block{ | ||
Header: &common.BlockHeader{Number: uint64(i)}, | ||
} | ||
c.put(block, 1) | ||
} | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 99}}, 1) | ||
block, exists := c.get(99) | ||
|
||
assertNotCached(t, exists, block) | ||
} | ||
|
||
func TestTooBigEntryNotCached(t *testing.T) { | ||
c := newCache(10) | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 100}}, 11) | ||
block, exists := c.get(100) | ||
|
||
assertNotCached(t, exists, block) | ||
} | ||
|
||
func TestOutOfOrderInsertionPanics(t *testing.T) { | ||
c := newCache(10) | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 100}}, 1) | ||
block, exists := c.get(100) | ||
|
||
assertCached(t, exists, block, 100) | ||
|
||
func() { | ||
defer func() { | ||
err := recover() | ||
assert.Contains(t, err.(string), "detected out of order block insertion: attempted to insert block number 102 but highest block is 100") | ||
}() | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 102}}, 1) | ||
}() | ||
} | ||
|
||
func TestDoubleInsertionPanics(t *testing.T) { | ||
c := newCache(10) | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 100}}, 1) | ||
block, exists := c.get(100) | ||
|
||
assertCached(t, exists, block, 100) | ||
|
||
func() { | ||
defer func() { | ||
err := recover() | ||
assert.Contains(t, err.(string), "detected insertion of the same block (100) twice") | ||
}() | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 100}}, 1) | ||
}() | ||
} | ||
|
||
func TestTooBigEntryEvictsSmallerEntries(t *testing.T) { | ||
c := newCache(10) | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 1}}, 1) | ||
c.put(&common.Block{Header: &common.BlockHeader{Number: 2}}, 1) | ||
c.put(&common.Block{Header: &common.BlockHeader{Number: 3}}, 1) | ||
|
||
block, exists := c.get(1) | ||
assertCached(t, exists, block, 1) | ||
|
||
block, exists = c.get(2) | ||
assertCached(t, exists, block, 2) | ||
|
||
block, exists = c.get(3) | ||
assertCached(t, exists, block, 3) | ||
|
||
c.put(&common.Block{Header: &common.BlockHeader{Number: 4}}, 10) | ||
|
||
block, exists = c.get(1) | ||
assertNotCached(t, exists, block) | ||
|
||
block, exists = c.get(2) | ||
assertNotCached(t, exists, block) | ||
|
||
block, exists = c.get(3) | ||
assertNotCached(t, exists, block) | ||
|
||
block, exists = c.get(4) | ||
assertCached(t, exists, block, 4) | ||
} | ||
|
||
func TestCacheEviction(t *testing.T) { | ||
c := newCache(10) | ||
|
||
for i := 0; i < 10; i++ { | ||
block := &common.Block{ | ||
Header: &common.BlockHeader{Number: uint64(i)}, | ||
} | ||
c.put(block, 1) | ||
} | ||
|
||
for i := 10; i < 20; i++ { | ||
// Ensure items 11 blocks in the past are not cached, but evicted | ||
if uint64(i) > 10 { | ||
block, exists := c.get(uint64(i) - 11) | ||
assertNotCached(t, exists, block) | ||
} | ||
// Ensure items 10 blocks in the past are still cached | ||
block, exists := c.get(uint64(i) - 10) | ||
assertCached(t, exists, block, uint64(i)-10) | ||
|
||
block = &common.Block{ | ||
Header: &common.BlockHeader{Number: uint64(i)}, | ||
} | ||
c.put(block, 1) | ||
} | ||
|
||
block, exists := c.get(9) | ||
assertNotCached(t, exists, block) | ||
|
||
for i := 10; i < 20; i++ { | ||
block, exists := c.get(uint64(i)) | ||
assertCached(t, exists, block, uint64(i)) | ||
} | ||
} | ||
|
||
func assertNotCached(t *testing.T, exists bool, block *common.Block) { | ||
assertWasCached(t, exists, block, 0, false) | ||
} | ||
|
||
func assertCached(t *testing.T, exists bool, block *common.Block, expectedSeq uint64) { | ||
assertWasCached(t, exists, block, expectedSeq, true) | ||
} | ||
|
||
func assertWasCached(t *testing.T, exists bool, block *common.Block, expectedSeq uint64, expectedExists bool) { | ||
if !expectedExists { | ||
require.False(t, exists) | ||
require.Nil(t, block) | ||
return | ||
} | ||
require.True(t, exists) | ||
require.NotNil(t, block) | ||
require.Equal(t, expectedSeq, block.Header.Number) | ||
} |