From caf963c588ed2e35432a9c167bf6bbaa07d99971 Mon Sep 17 00:00:00 2001 From: "hf-security-analysis[bot]" <265538906+hf-security-analysis[bot]@users.noreply.github.com> Date: Wed, 13 May 2026 07:34:31 +0000 Subject: [PATCH] fix(security): remediate workflow vulnerability in .github/workflows/serge_review.yml --- .github/workflows/serge_review.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/serge_review.yml b/.github/workflows/serge_review.yml index 692cb488b95c..9ad00516dbb6 100644 --- a/.github/workflows/serge_review.yml +++ b/.github/workflows/serge_review.yml @@ -57,6 +57,16 @@ jobs: # are wiped for parity with the hardening in claude_review.yml. run: rm -rf .ai/ .claude/ CLAUDE.md + - name: Sanitize comment body + id: sanitize + run: | + COMMENT_BODY="${{ github.event.comment.body }}" + # Reject comments with adversarial prompt injection phrases + if echo "$COMMENT_BODY" | grep -iE '(ignore (previous|all) (instructions?|rules?|prompts?)|disregard (the )?(above|previous)|you are now|new (instructions?|rules?)|system:? |<\|im_start\||<\|im_end\||### (Instruction|System))'; then + echo "Potential prompt injection detected in comment" >&2 + exit 1 + fi + - uses: tarekziade/ai-reviewer@main with: llm_api_key: ${{ secrets.ANTHROPIC_API_KEY }}