Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
19e8dee
feat: Add metadata-only replace API to Table for REPLACE snapshot ope…
qzyu999 Mar 9, 2026
6800bb4
chore: fix linter and mypy errors in replace API and tests
qzyu999 Mar 9, 2026
e5e11b9
test: fix invalid operation assertion and add license headers
qzyu999 Mar 9, 2026
070c885
chore: Remove comment for linter
qzyu999 Mar 9, 2026
f12fa5d
test: fix invalid operation assertion by using model_construct to byp…
qzyu999 Mar 9, 2026
94bd87e
fix: Remove replace as a public function for the Transaction and Tabl…
qzyu999 Mar 27, 2026
a2f2b18
test: Update tests to reflect stricter requirements for REPLACE
qzyu999 Mar 27, 2026
356d704
test: Fix import errors and mypy exception
qzyu999 Mar 28, 2026
87f6848
test: Fix syntax error
qzyu999 Mar 28, 2026
41eb549
test: Fix cast errors
qzyu999 Mar 28, 2026
a91dfb4
style: Run linter
qzyu999 Mar 28, 2026
ef9b84f
fix: Update the snapshot_id for _RewriteFiles._deleted_entries to mar…
qzyu999 Mar 28, 2026
596df80
feat: Update the _RewriteFiles._commit to correctly raise errors in c…
qzyu999 Mar 28, 2026
33aaef0
test: Update test_replace_passes_through_delete_manifests to account …
qzyu999 Mar 28, 2026
b0a770c
refactor: refactor _existing_manifests into _SnapshotProducer for _Ov…
qzyu999 Apr 15, 2026
59f555e
style: Linter fix for docstring
qzyu999 Apr 15, 2026
c8162a8
feat: update _deleted_entries in _RewriteFiles to use the @cached_pro…
qzyu999 Apr 16, 2026
d939b67
test: update test_replace_internally to add sequence number check alo…
qzyu999 Apr 16, 2026
c3570d8
fix: add missing branch arg to _RewriteFiles and a corresponding test
qzyu999 Apr 16, 2026
d7e89db
fix: lint error
qzyu999 Apr 16, 2026
9681ec3
refactor: update the test_replace.py to use a reusable _create_dummy_…
qzyu999 Apr 16, 2026
8f1f9b9
refactor: update _get_existing_manifests to avoid hardcoding for the …
qzyu999 Apr 16, 2026
c60d5ad
fix: remove repeated docstring in _RewriteFiles and rely on documenta…
qzyu999 Apr 16, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pyiceberg/table/snapshots.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def _partition_summary(self, update_metrics: UpdateMetrics) -> str:


def update_snapshot_summaries(summary: Summary, previous_summary: Mapping[str, str] | None = None) -> Summary:
if summary.operation not in {Operation.APPEND, Operation.OVERWRITE, Operation.DELETE}:
if summary.operation not in {Operation.APPEND, Operation.OVERWRITE, Operation.DELETE, Operation.REPLACE}:
raise ValueError(f"Operation not implemented: {summary.operation}")

if not previous_summary:
Expand Down
154 changes: 111 additions & 43 deletions pyiceberg/table/update/snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,48 @@ def _calculate_added_rows(self, manifests: list[ManifestFile]) -> int:
added_rows += manifest.added_rows_count
return added_rows

def _get_existing_manifests(self, should_use_manifest_pruning: bool = False) -> list[ManifestFile]:
"""Filter existing manifests and rewrite those containing deleted data files."""
existing_files: list[ManifestFile] = []
# Use manifest pruning if a predicate is set (primarily for Overwrite)
manifest_evaluators: dict[int, Callable[[ManifestFile], bool]] = KeyDefaultDict(self._build_manifest_evaluator)

if snapshot := self._transaction.table_metadata.snapshot_by_name(name=self._target_branch):
for manifest_file in snapshot.manifests(io=self._io):
# Skip pruning for rewrite operations unless we want to optimize later
if should_use_manifest_pruning and not manifest_evaluators[manifest_file.partition_spec_id](manifest_file):
existing_files.append(manifest_file)
continue

entries_to_write: list[ManifestEntry] = []
found_deleted_entries = False

for entry in manifest_file.fetch_manifest_entry(io=self._io, discard_deleted=True):
if entry.data_file in self._deleted_data_files:
found_deleted_entries = True
else:
entries_to_write.append(entry)

if not found_deleted_entries:
existing_files.append(manifest_file)
continue

if len(entries_to_write) > 0:
with self.new_manifest_writer(self.spec(manifest_file.partition_spec_id)) as writer:
for entry in entries_to_write:
writer.add_entry(
ManifestEntry.from_args(
status=ManifestEntryStatus.EXISTING,
snapshot_id=entry.snapshot_id,
sequence_number=entry.sequence_number,
file_sequence_number=entry.file_sequence_number,
data_file=entry.data_file,
)
)
existing_files.append(writer.to_manifest_file())

return existing_files

@abstractmethod
def _deleted_entries(self) -> list[ManifestEntry]: ...

Expand Down Expand Up @@ -585,49 +627,7 @@ class _OverwriteFiles(_SnapshotProducer["_OverwriteFiles"]):

def _existing_manifests(self) -> list[ManifestFile]:
"""Determine if there are any existing manifest files."""
existing_files = []

manifest_evaluators: dict[int, Callable[[ManifestFile], bool]] = KeyDefaultDict(self._build_manifest_evaluator)
if snapshot := self._transaction.table_metadata.snapshot_by_name(name=self._target_branch):
for manifest_file in snapshot.manifests(io=self._io):
# Manifest does not contain rows that match the files to delete partitions
if not manifest_evaluators[manifest_file.partition_spec_id](manifest_file):
existing_files.append(manifest_file)
continue

entries_to_write: set[ManifestEntry] = set()
found_deleted_entries: set[ManifestEntry] = set()

for entry in manifest_file.fetch_manifest_entry(io=self._io, discard_deleted=True):
if entry.data_file in self._deleted_data_files:
found_deleted_entries.add(entry)
else:
entries_to_write.add(entry)

# Is the intercept the empty set?
if len(found_deleted_entries) == 0:
existing_files.append(manifest_file)
continue

# Delete all files from manifest
if len(entries_to_write) == 0:
continue

# We have to rewrite the manifest file without the deleted data files
with self.new_manifest_writer(self.spec(manifest_file.partition_spec_id)) as writer:
for entry in entries_to_write:
writer.add_entry(
ManifestEntry.from_args(
status=ManifestEntryStatus.EXISTING,
snapshot_id=entry.snapshot_id,
sequence_number=entry.sequence_number,
file_sequence_number=entry.file_sequence_number,
data_file=entry.data_file,
)
)
existing_files.append(writer.to_manifest_file())

return existing_files
return self._get_existing_manifests(should_use_manifest_pruning=True)

def _deleted_entries(self) -> list[ManifestEntry]:
"""To determine if we need to record any deleted entries.
Expand Down Expand Up @@ -667,6 +667,65 @@ def _get_entries(manifest: ManifestFile) -> list[ManifestEntry]:
return []


class _RewriteFiles(_SnapshotProducer["_RewriteFiles"]):
"""A snapshot producer that rewrites data files."""

def _commit(self) -> UpdatesAndRequirements:
# Only produce a commit when there is something to rewrite
if self._deleted_data_files or self._added_data_files:
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we can replicate the _DeleteFiles logic here by using the @cache_property on _the compute deletes function. Especially since _commit() calls self._deleted_entries() for validation and then calls the super commit to write and get delete entries.

def _compute_deletes(self) -> tuple[list[ManifestFile], list[ManifestEntry], bool]:

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @geruh, great suggestion, I've applied the changes in c8162a8.

# Grab the entries that we actually found in the table's manifests
deleted_entries = self._deleted_entries()
found_deleted_files = {entry.data_file for entry in deleted_entries}

# If the user asked to delete files that aren't in the table, abort.
if len(found_deleted_files) != len(self._deleted_data_files):
raise ValueError("Cannot delete files that are not present in the table")

added_records = sum(f.record_count for f in self._added_data_files)
deleted_records = sum(entry.data_file.record_count for entry in deleted_entries)

if added_records > deleted_records:
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Where are you seeing this invariant? I mean this seems correct since the spec says rewrite must be "logically equivalent". This check could reasonable as a safety guard, but what happens when delete file rewriting is added? Then these numbers could be incorrect.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @geruh, thanks for flagging this, you're right that this is a safety guard, but it doesn't yet factor in future changes when adding delete file rewriting. Should we add something like this?

# Note: This physical record count invariant is a sanity guard for data file 
# compaction to ensure no data is accidentally duplicated or invented. 
# TODO: This will need to be evolved into a logical record count validation 
# once PyIceberg supports rewriting delete files (Merge-on-Read).
added_records = sum(f.record_count for f in self._added_data_files)
deleted_records = sum(entry.data_file.record_count for entry in deleted_entries)

if added_records > deleted_records:
    raise ValueError(f"Invalid replace: records added ({added_records}) exceeds records removed ({deleted_records})")

This logical record count validation would involve something like having the _commit method to do the following, which the codebase currently cannot do:

  • Identify associated Delete Files: For every DataFile you are deleting, you would need to find every Position Delete or Equality Delete file that points to it.
  • Calculate the "Subtraction": You would need to subtract those delete row counts from the physical record_count of the old files to find the Old Logical Count.
  • Compare: You would then verify that Old Logical Count == New Logical Count.
    The current _RewriteFiles implementation is "blind" to deletes. It only tracks _added_data_files and _deleted_data_files.
    I believe this can be part of a full MoR implementation, something that I would love to work on after finishing these maintenance tasks.

Otherwise, I can also remove it from _RerwriteFiles and move forward, WDYT?

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay let's keep the check I took a deeper look into the snapshot producer on the java side so let's align closer to that:

https://github.com/apache/iceberg/blob/dde712ec9ed6c9d28183ee4615d50f97b246af5d/core/src/main/java/org/apache/iceberg/SnapshotProducer.java#L322-L334

raise ValueError(f"Invalid replace: records added ({added_records}) exceeds records removed ({deleted_records})")

return super()._commit()
else:
return (), ()

@cached_property
def _cached_deleted_entries(self) -> list[ManifestEntry]:
"""Check if we need to mark the files as deleted."""
if self._parent_snapshot_id is not None:
previous_snapshot = self._transaction.table_metadata.snapshot_by_id(self._parent_snapshot_id)
if previous_snapshot is None:
raise ValueError(f"Could not find the previous snapshot: {self._parent_snapshot_id}")

executor = ExecutorFactory.get_or_create()

def _get_entries(manifest: ManifestFile) -> list[ManifestEntry]:
return [
ManifestEntry.from_args(
status=ManifestEntryStatus.DELETED,
snapshot_id=self.snapshot_id,
sequence_number=entry.sequence_number,
file_sequence_number=entry.file_sequence_number,
data_file=entry.data_file,
)
for entry in manifest.fetch_manifest_entry(self._io, discard_deleted=True)
if entry.data_file.content == DataFileContent.DATA and entry.data_file in self._deleted_data_files
]

list_of_entries = executor.map(_get_entries, previous_snapshot.manifests(self._io))
return list(itertools.chain(*list_of_entries))
else:
return []

def _deleted_entries(self) -> list[ManifestEntry]:
return self._cached_deleted_entries

def _existing_manifests(self) -> list[ManifestFile]:
return self._get_existing_manifests()
Comment on lines +722 to +726
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Looks like these doc strings were copy pasta'd over from the other classes, and don't fit how they are used here. Either we can remove them or change to fit their usage.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @geruh, thanks for pointing this out, these doc strings in _RewriteFiles have been removed in c60d5ad.



class UpdateSnapshot:
_transaction: Transaction
_io: FileIO
Expand Down Expand Up @@ -724,6 +783,15 @@ def delete(self) -> _DeleteFiles:
snapshot_properties=self._snapshot_properties,
)

def replace(self) -> _RewriteFiles:
return _RewriteFiles(
Copy link
Copy Markdown
Member

@geruh geruh Apr 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm sort of confused by the naming since we are introducing a user facing API replace but the underlying snapshot operation is a rewrite? We should rename to rewrite() for consistency? Unless I'm missing something?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @geruh, you bring up a good point, and it's something I noticed seemed off along the way. The reason why we have this discrepancy is because we're mirroring what's found in the Java code itself.

I named the Python API replace() to accurately reflect the Operation.REPLACE snapshot string it generates, while keeping the internal class named _RewriteFiles to match the Java builder logic.

That said, if you feel strongly about matching the Java API's user-facing method (rewrite()) rather than the snapshot operation (replace()), I'm happy to rename the public method to rewrite() for consistency. Let me know what you prefer!

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah there is a bit of a distinction here, since rewrite is basically the rewrite of data files and replace is the logical change to your snapshot metadata. My thinking is that the users in java today are used to interacting with this api through:

table.newRewrite()
    .deleteFile(old)
    .addFile(new)
    .commit();

So someone coming from Java Iceberg will look for rewrite, not replace. But ultimately maybe there is more of a history as to why the it follows this naming convention im missing on.

WDYT @kevinjqliu?

operation=Operation.REPLACE,
transaction=self._transaction,
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I noticed that branch is missing here is there a reason for that?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hi @geruh, thanks for noticing, this was an oversight. The changes have been made in c3570d8 and a test has been added for this.

io=self._io,
branch=self._branch,
snapshot_properties=self._snapshot_properties,
)


class _ManifestMergeManager(Generic[U]):
_target_size_bytes: int
Expand Down
Loading
Loading