Skip to content

Commit

Permalink
remove the original form block compression, now that the hard fork ha…
Browse files Browse the repository at this point in the history
…s activated and we serialize CLVM in a more efficient way
  • Loading branch information
arvidn committed Jun 20, 2024
1 parent e87d51a commit ffa6cd8
Show file tree
Hide file tree
Showing 11 changed files with 19 additions and 291 deletions.
10 changes: 1 addition & 9 deletions chia/_tests/blockchain/test_blockchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
from chia.consensus.full_block_to_block_record import block_to_block_record
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.consensus.pot_iterations import is_overflow_block
from chia.full_node.bundle_tools import detect_potential_template_generator
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chia.simulator.block_tools import BlockTools, create_block_tools_async
from chia.simulator.keyring import TempKeyring
Expand Down Expand Up @@ -2553,20 +2552,13 @@ async def test_invalid_transactions_ref_list(
)
await _validate_and_add_block(b, blocks[-1])
assert blocks[-1].transactions_generator is not None
generator_arg = detect_potential_template_generator(blocks[-1].height, blocks[-1].transactions_generator)
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
# once the hard for activates, we don't use this form of block
# compression anymore
assert generator_arg is None
else:
assert generator_arg is not None

blocks = bt.get_consecutive_blocks(
1,
block_list_input=blocks,
guarantee_transaction_block=True,
transaction_data=tx,
previous_generator=generator_arg,
previous_generator=[blocks[-1].height],
)
block = blocks[-1]
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
Expand Down
29 changes: 0 additions & 29 deletions chia/_tests/core/full_node/test_full_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from chia._tests.util.time_out_assert import time_out_assert, time_out_assert_custom_interval, time_out_messages
from chia.consensus.block_body_validation import ForkInfo
from chia.consensus.pot_iterations import is_overflow_block
from chia.full_node.bundle_tools import detect_potential_template_generator
from chia.full_node.full_node import WalletUpdate
from chia.full_node.full_node_api import FullNodeAPI
from chia.full_node.signage_point import SignagePoint
Expand Down Expand Up @@ -217,13 +216,6 @@ async def check_transaction_confirmed(transaction) -> bool:
# Confirm generator is not compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
template = detect_potential_template_generator(uint32(5), program)
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
# after the hard fork we don't use this compression mechanism
# anymore, we use CLVM backrefs in the encoding instead
assert template is None
else:
assert template is not None
assert len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list) == 0

# Send another tx
Expand Down Expand Up @@ -252,7 +244,6 @@ async def check_transaction_confirmed(transaction) -> bool:
# Confirm generator is compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
assert detect_potential_template_generator(uint32(6), program) is None
num_blocks = len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list)
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
# after the hard fork we don't use this compression mechanism
Expand Down Expand Up @@ -333,7 +324,6 @@ async def check_transaction_confirmed(transaction) -> bool:
# Confirm generator is compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
assert detect_potential_template_generator(uint32(9), program) is None
num_blocks = len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list)
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
# after the hard fork we don't use this compression mechanism
Expand Down Expand Up @@ -429,13 +419,6 @@ async def check_transaction_confirmed(transaction) -> bool:
# Confirm generator is not compressed
program: Optional[SerializedProgram] = (await full_node_1.get_all_full_blocks())[-1].transactions_generator
assert program is not None
template = detect_potential_template_generator(uint32(11), program)
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
# after the hard fork we don't use this compression mechanism
# anymore, we use CLVM backrefs in the encoding instead
assert template is None
else:
assert template is not None
assert len((await full_node_1.get_all_full_blocks())[-1].transactions_generator_ref_list) == 0

height = full_node_1.full_node.blockchain.get_peak().height
Expand All @@ -444,13 +427,6 @@ async def check_transaction_confirmed(transaction) -> bool:
all_blocks: List[FullBlock] = await full_node_1.get_all_full_blocks()
assert height == len(all_blocks) - 1

template = full_node_1.full_node.full_node_store.previous_generator
if consensus_mode >= ConsensusMode.HARD_FORK_2_0:
# after the hard fork we don't use this compression mechanism
# anymore, we use CLVM backrefs in the encoding instead
assert template is None
else:
assert template is not None
if test_reorgs:
reog_blocks = bt.get_consecutive_blocks(14)
for r in range(0, len(reog_blocks), 3):
Expand All @@ -477,11 +453,6 @@ async def check_transaction_confirmed(transaction) -> bool:
for result in results:
assert result.error is None

# Test revert previous_generator
for block in reog_blocks:
await full_node_1.full_node.add_block(block)
assert full_node_1.full_node.full_node_store.previous_generator is None


class TestFullNodeProtocol:
@pytest.mark.anyio
Expand Down
109 changes: 1 addition & 108 deletions chia/_tests/generator/test_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@
from chia._tests.core.make_block_generator import make_spend_bundle
from chia._tests.generator.test_rom import run_generator
from chia.full_node.bundle_tools import (
bundle_suitable_for_compression,
compressed_coin_spend_entry_list,
compressed_spend_bundle_solution,
match_standard_transaction_at_any_index,
simple_solution_generator,
simple_solution_generator_backrefs,
Expand All @@ -25,7 +23,7 @@
from chia.simulator.block_tools import test_constants
from chia.types.blockchain_format.program import INFINITE_COST, Program
from chia.types.blockchain_format.serialized_program import SerializedProgram
from chia.types.generator_types import BlockGenerator, CompressorArg
from chia.types.generator_types import BlockGenerator
from chia.types.spend_bundle import SpendBundle
from chia.util.byte_types import hexstr_to_bytes
from chia.util.ints import uint32
Expand Down Expand Up @@ -65,39 +63,6 @@
assert serialized_length(gen2) == len(gen2)


@dataclass(frozen=True)
class MultipleCompressorArg:
arg: List[CompressorArg]
split_offset: int


def create_multiple_ref_generator(args: MultipleCompressorArg, spend_bundle: SpendBundle) -> BlockGenerator:
"""
Decompress a transaction by referencing bytes from multiple input generator references
"""
compressed_cse_list = compressed_coin_spend_entry_list(spend_bundle)
program = TEST_MULTIPLE.curry(
DECOMPRESS_PUZZLE,
DECOMPRESS_CSE_WITH_PREFIX,
args.arg[0].start,
args.arg[0].end - args.split_offset,
args.arg[1].end - args.split_offset,
args.arg[1].end,
compressed_cse_list,
)

# TODO aqk: Improve ergonomics of CompressorArg -> GeneratorArg conversion
generator_list = [
args.arg[0].generator,
args.arg[1].generator,
]
generator_heights = [
FAKE_BLOCK_HEIGHT1,
FAKE_BLOCK_HEIGHT2,
]
return BlockGenerator(SerializedProgram.from_program(program), generator_list, generator_heights)


def spend_bundle_to_coin_spend_entry_list(bundle: SpendBundle) -> List[Any]:
r = []
for coin_spend in bundle.coin_spends:
Expand All @@ -112,81 +77,9 @@ def spend_bundle_to_coin_spend_entry_list(bundle: SpendBundle) -> List[Any]:


class TestCompression:
def test_spend_bundle_suitable(self) -> None:
sb: SpendBundle = make_spend_bundle(1)
assert bundle_suitable_for_compression(sb)

def test_compress_spend_bundle(self) -> None:
pass

def test_multiple_input_gen_refs(self) -> None:
match = match_standard_transaction_at_any_index(gen1)
assert match is not None
start1, end1 = match
match = match_standard_transaction_at_any_index(gen2)
assert match is not None
start2, end2 = match
ca1 = CompressorArg(FAKE_BLOCK_HEIGHT1, SerializedProgram.from_bytes(gen1), start1, end1)
ca2 = CompressorArg(FAKE_BLOCK_HEIGHT2, SerializedProgram.from_bytes(gen2), start2, end2)

prefix_len1 = end1 - start1
prefix_len2 = end2 - start2
assert prefix_len1 == prefix_len2
prefix_len = prefix_len1
results = []
for split_offset in range(prefix_len):
gen_args = MultipleCompressorArg([ca1, ca2], split_offset)
spend_bundle: SpendBundle = make_spend_bundle(1)
multi_gen = create_multiple_ref_generator(gen_args, spend_bundle)
cost, result = run_generator(multi_gen)
results.append(result)
assert result is not None
assert cost > 0
assert all(r == results[0] for r in results)

def test_compressed_block_results(self) -> None:
sb: SpendBundle = make_spend_bundle(1)
match = match_standard_transaction_at_any_index(original_generator)
assert match is not None
start, end = match
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
s = simple_solution_generator(sb)
assert c != s
cost_c, result_c = run_generator(c)
cost_s, result_s = run_generator(s)
print()
print(result_c)
assert result_c is not None
assert result_s is not None
print(result_s)
assert result_c == result_s

def test_get_removals_for_single_coin(self) -> None:
sb: SpendBundle = make_spend_bundle(1)
match = match_standard_transaction_at_any_index(original_generator)
assert match is not None
start, end = match
ca = CompressorArg(uint32(0), SerializedProgram.from_bytes(original_generator), start, end)
c = compressed_spend_bundle_solution(ca, sb)
removal = sb.coin_spends[0].coin
spend_info = get_puzzle_and_solution_for_coin(c, removal, 0, test_constants)
assert bytes(spend_info.puzzle) == bytes(sb.coin_spends[0].puzzle_reveal)
assert bytes(spend_info.solution) == bytes(sb.coin_spends[0].solution)
# Test non compressed generator as well
s = simple_solution_generator(sb)
spend_info = get_puzzle_and_solution_for_coin(s, removal, 0, test_constants)
assert bytes(spend_info.puzzle) == bytes(sb.coin_spends[0].puzzle_reveal)
assert bytes(spend_info.solution) == bytes(sb.coin_spends[0].solution)

# test with backrefs (2.0 hard-fork)
s = simple_solution_generator_backrefs(sb)
spend_info = get_puzzle_and_solution_for_coin(s, removal, test_constants.HARD_FORK_HEIGHT + 1, test_constants)
assert Program.from_bytes(bytes(spend_info.puzzle)) == Program.from_bytes(
bytes(sb.coin_spends[0].puzzle_reveal)
)
assert Program.from_bytes(bytes(spend_info.solution)) == Program.from_bytes(bytes(sb.coin_spends[0].solution))


class TestDecompression:
def test_deserialization(self) -> None:
Expand Down
42 changes: 1 addition & 41 deletions chia/full_node/bundle_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,12 @@

from chia_rs import solution_generator, solution_generator_backrefs

from chia.full_node.generator import create_compressed_generator
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.serialized_program import SerializedProgram
from chia.types.coin_spend import CoinSpend
from chia.types.generator_types import BlockGenerator, CompressorArg
from chia.types.generator_types import BlockGenerator
from chia.types.spend_bundle import SpendBundle
from chia.util.byte_types import hexstr_to_bytes
from chia.util.ints import uint32


def simple_solution_generator(bundle: SpendBundle) -> BlockGenerator:
Expand Down Expand Up @@ -71,46 +69,8 @@ def puzzle_suitable_for_compression(puzzle: SerializedProgram) -> bool:
return True if match_standard_transaction_exactly_and_return_pubkey(puzzle) else False


def bundle_suitable_for_compression(bundle: SpendBundle) -> bool:
return all(puzzle_suitable_for_compression(coin_spend.puzzle_reveal) for coin_spend in bundle.coin_spends)


def compressed_coin_spend_entry_list(bundle: SpendBundle) -> List[List[List[Union[bytes, None, int, Program]]]]:
compressed_cse_list: List[List[List[Union[bytes, None, int, Program]]]] = []
for coin_spend in bundle.coin_spends:
compressed_cse_list.append(compress_coin_spend(coin_spend))
return compressed_cse_list


def compressed_spend_bundle_solution(original_generator_params: CompressorArg, bundle: SpendBundle) -> BlockGenerator:
compressed_cse_list = compressed_coin_spend_entry_list(bundle)
return create_compressed_generator(original_generator_params, compressed_cse_list)


def best_solution_generator_from_template(previous_generator: CompressorArg, bundle: SpendBundle) -> BlockGenerator:
"""
Creates a compressed block generator, taking in a block that passes the checks below
"""
if bundle_suitable_for_compression(bundle):
return compressed_spend_bundle_solution(previous_generator, bundle)
else:
return simple_solution_generator(bundle)


def detect_potential_template_generator(block_height: uint32, program: SerializedProgram) -> Optional[CompressorArg]:
"""
If this returns a GeneratorArg, that means that the input, `program`, has a standard transaction
that is not compressed that we can use as a template for future blocks.
If it returns None, this block cannot be used.
In this implementation, we store the offsets needed by the compressor in the GeneratorArg
This block will serve as a template for the compression of other newly farmed blocks.
"""

m = match_standard_transaction_at_any_index(bytes(program))
if m is None:
return None
start, end = m
if start and end and end > start >= 0:
return CompressorArg(block_height, program, start, end)
else:
return None
14 changes: 0 additions & 14 deletions chia/full_node/full_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,6 @@
from chia.consensus.multiprocess_validation import PreValidationResult
from chia.consensus.pot_iterations import calculate_sp_iters
from chia.full_node.block_store import BlockStore
from chia.full_node.bundle_tools import detect_potential_template_generator
from chia.full_node.coin_store import CoinStore
from chia.full_node.full_node_api import FullNodeAPI
from chia.full_node.full_node_store import FullNodeStore, FullNodeStorePeakResult, UnfinishedBlockEntry
Expand Down Expand Up @@ -1479,12 +1478,6 @@ async def peak_post_processing(
f"{len(block.transactions_generator_ref_list) if block.transactions_generator else 'No tx'}"
)

if (
self.full_node_store.previous_generator is not None
and state_change_summary.fork_height < self.full_node_store.previous_generator.block_height
):
self.full_node_store.previous_generator = None

hints_to_add, lookup_coin_ids = get_hints_and_subscription_coin_ids(
state_change_summary,
self.subscriptions.has_coin_subscription,
Expand Down Expand Up @@ -1549,13 +1542,6 @@ async def peak_post_processing(
spent_coins: List[bytes32] = [coin_id for coin_id, _ in state_change_summary.removals]
mempool_new_peak_result = await self.mempool_manager.new_peak(self.blockchain.get_tx_peak(), spent_coins)

# Check if we detected a spent transaction, to load up our generator cache
if block.transactions_generator is not None and self.full_node_store.previous_generator is None:
generator_arg = detect_potential_template_generator(block.height, block.transactions_generator)
if generator_arg:
self.log.info(f"Saving previous generator for height {block.height}")
self.full_node_store.previous_generator = generator_arg

return PeakPostProcessingResult(
mempool_new_peak_result.items,
mempool_new_peak_result.removals,
Expand Down
17 changes: 2 additions & 15 deletions chia/full_node/full_node_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,7 @@
from chia.consensus.block_record import BlockRecord
from chia.consensus.blockchain import BlockchainMutexPriority
from chia.consensus.pot_iterations import calculate_ip_iters, calculate_iterations_quality, calculate_sp_iters
from chia.full_node.bundle_tools import (
best_solution_generator_from_template,
simple_solution_generator,
simple_solution_generator_backrefs,
)
from chia.full_node.bundle_tools import simple_solution_generator, simple_solution_generator_backrefs
from chia.full_node.coin_store import CoinStore
from chia.full_node.fee_estimate import FeeEstimate, FeeEstimateGroup, fee_rate_v2_to_v1
from chia.full_node.fee_estimator_interface import FeeEstimatorInterface
Expand Down Expand Up @@ -859,16 +855,7 @@ async def declare_proof_of_space(
if peak.height >= self.full_node.constants.HARD_FORK_HEIGHT:
block_generator = simple_solution_generator_backrefs(spend_bundle)
else:
if self.full_node.full_node_store.previous_generator is not None:
self.log.info(
f"Using previous generator for height "
f"{self.full_node.full_node_store.previous_generator}"
)
block_generator = best_solution_generator_from_template(
self.full_node.full_node_store.previous_generator, spend_bundle
)
else:
block_generator = simple_solution_generator(spend_bundle)
block_generator = simple_solution_generator(spend_bundle)

def get_plot_sig(to_sign: bytes32, _extra: G1Element) -> G2Element:
if to_sign == request.challenge_chain_sp:
Expand Down
Loading

0 comments on commit ffa6cd8

Please sign in to comment.