Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
134 changes: 134 additions & 0 deletions paimon-python/pypaimon/read/reader/partial_update_merge_function.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################

"""
Python port of Java's ``PartialUpdateMergeFunction``
(``paimon-core/src/main/java/org/apache/paimon/mergetree/compact/
PartialUpdateMergeFunction.java``).

The merge function used by the ``partial-update`` merge engine on PK
tables: rows sharing a primary key are merged left-to-right, taking the
latest non-null value per non-PK field. ``DeduplicateMergeFunction``
keeps only the latest row; ``PartialUpdateMergeFunction`` instead lets
later writes "fill in" fields the earlier writes left null, so users
can write the same logical record across multiple commits with
different sets of non-null columns.

This is the **core merge semantics only**. The Java implementation also
supports per-field aggregator overrides (``fields.<name>.aggregate-
function``), sequence groups (``fields.<name>.sequence-group``),
``ignore-delete``, and ``partial-update.remove-record-on-*`` options.
None of those are implemented yet; non-INSERT row kinds raise
``NotImplementedError`` at ``add`` time so we never silently corrupt
data with a half-implemented contract.
"""

from typing import Any, List, Optional

from pypaimon.table.row.key_value import KeyValue
from pypaimon.table.row.row_kind import RowKind


class PartialUpdateMergeFunction:
"""A MergeFunction where the key is the primary key (unique) and the
value is merged across all rows for that key by taking the latest
non-null value per non-PK field.

Mirrors the ``MergeFunction`` protocol used by ``SortMergeReader``:
``reset`` (between groups of same-key rows), ``add`` (one row at a
time, oldest to newest), ``get_result`` (after the group is
exhausted).
"""

def __init__(self, key_arity: int, value_arity: int,
nullables: Optional[List[bool]] = None):
self._key_arity = key_arity
self._value_arity = value_arity
# Per-value-field nullable flags, parallel to value indices. When
# ``None``, no nullability check runs (preserves the contract for
# direct callers that don't have schema info handy). When given,
# mirrors Java's ``updateNonNullFields`` check: a null input on a
# NOT NULL field raises rather than being silently absorbed.
if nullables is not None and len(nullables) != value_arity:
raise ValueError(
"nullables length {} does not match value_arity {}".format(
len(nullables), value_arity))
self._nullables = nullables
# Lazily allocated on first add(); ``None`` means "no rows yet".
self._accumulator: Optional[List[Any]] = None
# Reference to the most recently added kv. We use it only to
# propagate the key + sequence_number into the result row, and we
# snapshot those two values into a fresh tuple in ``get_result()``
# so the result is not aliased to upstream's reused KeyValue.
self._latest_kv: Optional[KeyValue] = None

def reset(self) -> None:
self._accumulator = None
self._latest_kv = None

def add(self, kv: KeyValue) -> None:
row_kind_byte = kv.value_row_kind_byte
if not RowKind.is_add_byte(row_kind_byte):
# DELETE / UPDATE_BEFORE need ignore-delete or
# partial-update.remove-record-on-delete to be set in Java;
# neither option is wired up in pypaimon yet, so refuse the
# row rather than silently swallow it.
raise NotImplementedError(
"PartialUpdateMergeFunction received a {} row; this "
"Python port does not yet implement the ignore-delete / "
"partial-update.remove-record-on-delete options. Use the "
"Java client for tables that produce DELETE / "
"UPDATE_BEFORE rows.".format(RowKind(row_kind_byte).to_string())
)

# Mirror Java's reset() + updateNonNullFields(): the accumulator
# starts as all-null (equivalent to ``new GenericRow(arity)``) and
# each add() writes non-null inputs; null inputs are absorbed —
# except when the schema marks the field NOT NULL, in which case
# we raise to match Java's IllegalArgumentException check.
if self._accumulator is None:
self._accumulator = [None] * self._value_arity
for i in range(self._value_arity):
v = kv.value.get_field(i)
if v is not None:
self._accumulator[i] = v
elif self._nullables is not None and not self._nullables[i]:
raise ValueError("Field {} can not be null".format(i))
self._latest_kv = kv

def get_result(self) -> Optional[KeyValue]:
if self._accumulator is None or self._latest_kv is None:
return None

kv = self._latest_kv
# Snapshot the key as a fresh tuple — we cannot keep a reference
# to ``kv`` because upstream readers (e.g. KeyValueWrapReader)
# reuse a single KeyValue instance and mutate its underlying
# row_tuple between calls. Building a fresh tuple here means the
# result we return is decoupled from any subsequent iteration.
key_values = tuple(
kv.key.get_field(i) for i in range(self._key_arity)
)
result_row = key_values + (
kv.sequence_number,
RowKind.INSERT.value,
) + tuple(self._accumulator)

result = KeyValue(self._key_arity, self._value_arity)
result.replace(result_row)
return result
10 changes: 8 additions & 2 deletions paimon-python/pypaimon/read/reader/sort_merge_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,15 @@
class SortMergeReaderWithMinHeap(RecordReader):
"""SortMergeReader implemented with min-heap."""

def __init__(self, readers: List[RecordReader[KeyValue]], schema: TableSchema):
def __init__(self, readers: List[RecordReader[KeyValue]], schema: TableSchema,
merge_function: Optional[Any] = None):
self.next_batch_readers = list(readers)
self.merge_function = DeduplicateMergeFunction()
# Default to dedupe so callers that don't pass a merge_function
# keep their old behaviour. The merge engine dispatch lives in
# ``MergeFileSplitRead.section_reader_supplier`` for the read
# path; tests or other ad-hoc callers can pass a different
# implementation here.
self.merge_function = merge_function if merge_function is not None else DeduplicateMergeFunction()

if schema.partition_keys:
trimmed_primary_keys = [pk for pk in schema.primary_keys if pk not in schema.partition_keys]
Expand Down
107 changes: 104 additions & 3 deletions paimon-python/pypaimon/read/split_read.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from functools import partial
from typing import Callable, List, Optional, Tuple

from pypaimon.common.options.core_options import CoreOptions
from pypaimon.common.options.core_options import CoreOptions, MergeEngine
from pypaimon.common.predicate import Predicate
from pypaimon.deletionvectors import ApplyDeletionVectorReader
from pypaimon.deletionvectors.deletion_vector import DeletionVector
Expand Down Expand Up @@ -54,7 +54,10 @@
KeyValueUnwrapRecordReader
from pypaimon.read.reader.key_value_wrap_reader import KeyValueWrapReader
from pypaimon.read.reader.shard_batch_reader import ShardBatchReader
from pypaimon.read.reader.sort_merge_reader import SortMergeReaderWithMinHeap
from pypaimon.read.reader.partial_update_merge_function import \
PartialUpdateMergeFunction
from pypaimon.read.reader.sort_merge_reader import (DeduplicateMergeFunction,
SortMergeReaderWithMinHeap)
from pypaimon.read.push_down_utils import _get_all_fields
from pypaimon.read.split import Split
from pypaimon.read.sliced_split import SlicedSplit
Expand Down Expand Up @@ -98,6 +101,10 @@ def __init__(
self.split = split
self.row_tracking_enabled = row_tracking_enabled
self.value_arity = len(read_type)
# Snapshot the raw value-side schema before _create_key_value_fields
# wraps it, so MergeFileSplitRead can hand per-value-field nullable
# flags to merge functions that mirror Java's NOT-NULL check.
self.value_fields = list(read_type)

self.trimmed_primary_key = self.table.trimmed_primary_keys
self.read_fields = read_type
Expand Down Expand Up @@ -482,7 +489,101 @@ def section_reader_supplier(self, section: List[SortedRun]) -> RecordReader:
supplier = partial(self.kv_reader_supplier, file, self.deletion_file_readers.get(file.file_name, None))
data_readers.append(supplier)
readers.append(ConcatRecordReader(data_readers))
return SortMergeReaderWithMinHeap(readers, self.table.table_schema)
merge_function = self._build_merge_function()
return SortMergeReaderWithMinHeap(
readers, self.table.table_schema, merge_function=merge_function)

def _build_merge_function(self):
"""Pick the right MergeFunction implementation for the table's
``merge-engine`` option. ``DEDUPLICATE`` is the default and the
only engine supported on the Python read path historically;
``PARTIAL_UPDATE`` is now wired up to its dedicated
implementation. The remaining engines (``AGGREGATE`` /
``FIRST_ROW``) used to silently degrade to dedupe — that quietly
produced wrong data — so we now raise an explicit
``NotImplementedError`` instead, until they're ported.

For ``PARTIAL_UPDATE``, we also refuse to run when the table
configures any option whose semantics this port does not yet
implement (sequence-group, per-field aggregator overrides,
ignore-delete and friends). Without this guard those options
would be silently ignored and produce subtly wrong results —
the same anti-pattern this PR exists to close.
"""
engine = self.table.options.merge_engine()
if engine == MergeEngine.DEDUPLICATE:
return DeduplicateMergeFunction()
if engine == MergeEngine.PARTIAL_UPDATE:
unsupported = self._partial_update_unsupported_options()
if unsupported:
raise NotImplementedError(
"merge-engine 'partial-update' is enabled together "
"with options that pypaimon does not yet implement: "
"{}. The supported subset is per-key last-non-null "
"merge with no sequence-group, no per-field "
"aggregator override, no ignore-delete and no "
"partial-update.remove-record-on-* flags. Use the "
"Java client for the full feature set, or open an "
"issue to track Python support.".format(
", ".join(sorted(unsupported))
)
)
return PartialUpdateMergeFunction(
key_arity=len(self.trimmed_primary_key),
value_arity=self.value_arity,
nullables=[f.type.nullable for f in self.value_fields],
)
raise NotImplementedError(
"merge-engine '{}' is not implemented in pypaimon yet "
"(supported: deduplicate, partial-update). Use the Java "
"client or open an issue to track support.".format(engine.value)
)

Comment thread
TheR1sing3un marked this conversation as resolved.
# Boolean-valued options that, when truthy, opt the table into
# behaviour the Python PartialUpdateMergeFunction does not implement.
# Mirrors org.apache.paimon.CoreOptions and the fallback keys in
# PartialUpdateMergeFunction.java.
_PARTIAL_UPDATE_UNSUPPORTED_BOOLEAN_OPTIONS = (
"ignore-delete",
"partial-update.ignore-delete",
"first-row.ignore-delete",
"deduplicate.ignore-delete",
"partial-update.remove-record-on-delete",
"partial-update.remove-record-on-sequence-group",
)
_FIELDS_PREFIX = "fields."
_FIELD_SEQUENCE_GROUP_SUFFIX = ".sequence-group"
_FIELD_AGGREGATE_FUNCTION_SUFFIX = ".aggregate-function"
_DEFAULT_AGGREGATE_FUNCTION_KEY = "fields.default-aggregate-function"

def _partial_update_unsupported_options(self):
"""Return the set of option keys configured on this table that
``PartialUpdateMergeFunction`` does not yet support. Empty set
means we can safely run the simple last-non-null merge.
"""
flagged = set()
raw = self.table.options.options.to_map()
for key, value in raw.items():
if (key in self._PARTIAL_UPDATE_UNSUPPORTED_BOOLEAN_OPTIONS
and self._option_is_truthy(value)):
flagged.add(key)
elif key == self._DEFAULT_AGGREGATE_FUNCTION_KEY:
flagged.add(key)
elif key.startswith(self._FIELDS_PREFIX) and (
key.endswith(self._FIELD_SEQUENCE_GROUP_SUFFIX)
or key.endswith(self._FIELD_AGGREGATE_FUNCTION_SUFFIX)):
flagged.add(key)
return flagged

@staticmethod
def _option_is_truthy(raw):
if raw is None:
return False
if isinstance(raw, bool):
return raw
if isinstance(raw, str):
return raw.strip().lower() in ("true", "1", "yes", "on")
return bool(raw)

def create_reader(self) -> RecordReader:
# Create a dict mapping data file name to deletion file reader method
Expand Down
Loading
Loading