Skip to content

Commit

Permalink
Fix undefined behaviour warnings in ir_fold.h (#99)
Browse files Browse the repository at this point in the history
See php/php-src#17430

I changed the adds, subs, muls to use unsigned arithmetic because on
2-complement systems that's the same as signed arithmetic but without
potential UB warnings. This essentially makes the wrapping behaviour
defined. I only did this for 32 and 64 bit types because for 8 and 16
bit the operations will do integer promotion, avoiding the issue.

There is also `val.i64 < 0 && val.i64 - 1 < 0` that I changed.
This is because the second condition would be thrown away by the
compiler because `val.i64 < 0` and signed wrapping is undefined.
  • Loading branch information
nielsdos authored Jan 14, 2025
1 parent 65be1d8 commit e5b2d5a
Showing 1 changed file with 25 additions and 25 deletions.
50 changes: 25 additions & 25 deletions ir_fold.h
Original file line number Diff line number Diff line change
Expand Up @@ -323,13 +323,13 @@ IR_FOLD(ADD(C_I16, C_I16))
IR_FOLD(ADD(C_I32, C_I32))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type || (sizeof(void*) == 4 && IR_OPT_TYPE(opt) == IR_ADDR));
IR_FOLD_CONST_I(op1_insn->val.i32 + op2_insn->val.i32);
IR_FOLD_CONST_I(op1_insn->val.u32 + op2_insn->val.u32);
}

IR_FOLD(ADD(C_I64, C_I64))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type || (sizeof(void*) == 8 && IR_OPT_TYPE(opt) == IR_ADDR));
IR_FOLD_CONST_I(op1_insn->val.i64 + op2_insn->val.i64);
IR_FOLD_CONST_I(op1_insn->val.u64 + op2_insn->val.u64);
}

IR_FOLD(ADD(C_DOUBLE, C_DOUBLE))
Expand Down Expand Up @@ -393,13 +393,13 @@ IR_FOLD(SUB(C_I16, C_I16))
IR_FOLD(SUB(C_I32, C_I32))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
IR_FOLD_CONST_I(op1_insn->val.i32 - op2_insn->val.i32);
IR_FOLD_CONST_I(op1_insn->val.u32 - op2_insn->val.u32);
}

IR_FOLD(SUB(C_I64, C_I64))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
IR_FOLD_CONST_I(op1_insn->val.i64 - op2_insn->val.i64);
IR_FOLD_CONST_I(op1_insn->val.u64 - op2_insn->val.u64);
}

IR_FOLD(SUB(C_DOUBLE, C_DOUBLE))
Expand Down Expand Up @@ -463,13 +463,13 @@ IR_FOLD(MUL(C_I16, C_I16))
IR_FOLD(MUL(C_I32, C_I32))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
IR_FOLD_CONST_I(op1_insn->val.i32 * op2_insn->val.i32);
IR_FOLD_CONST_I(op1_insn->val.u32 * op2_insn->val.u32);
}

IR_FOLD(MUL(C_I64, C_I64))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
IR_FOLD_CONST_I(op1_insn->val.i64 * op2_insn->val.i64);
IR_FOLD_CONST_I(op1_insn->val.u64 * op2_insn->val.u64);
}

IR_FOLD(MUL(C_DOUBLE, C_DOUBLE))
Expand Down Expand Up @@ -556,7 +556,7 @@ IR_FOLD(NEG(C_I32))
IR_FOLD(NEG(C_I64))
{
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
IR_FOLD_CONST_I(-op1_insn->val.i64);
IR_FOLD_CONST_I(-op1_insn->val.u64);
}

IR_FOLD(NEG(C_DOUBLE))
Expand All @@ -580,7 +580,7 @@ IR_FOLD(ABS(C_I64))
if (op1_insn->val.i64 >= 0) {
IR_FOLD_COPY(op1);
} else {
IR_FOLD_CONST_I(-op1_insn->val.i64);
IR_FOLD_CONST_I(-op1_insn->val.u64);
}
}

Expand Down Expand Up @@ -680,7 +680,7 @@ IR_FOLD(MUL_OV(C_I64, C_I64))
int64_t min = - max - 1;
int64_t res;
IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type);
res = op1_insn->val.i64 * op2_insn->val.i64;
res = op1_insn->val.u64 * op2_insn->val.u64;
if (op1_insn->val.i64 != 0 && res / op1_insn->val.i64 != op2_insn->val.i64 && res >= min && res <= max) {
IR_FOLD_NEXT;
}
Expand Down Expand Up @@ -2518,7 +2518,7 @@ IR_FOLD(ADD(ADD, C_I64))
{
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
/* (x + c1) + c2 => x + (c1 + c2) */
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 + op2_insn->val.i64;
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
op1 = op1_insn->op1;
op2 = ir_const(ctx, val, IR_OPT_TYPE(opt));
IR_FOLD_RESTART;
Expand Down Expand Up @@ -2556,8 +2556,8 @@ IR_FOLD(ADD(SUB, C_I64))
{
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
/* (x - c1) + c2 => x + (c2 - c1) */
val.i64 = op2_insn->val.i64 - ctx->ir_base[op1_insn->op2].val.i64;
if (val.i64 < 0 && val.i64 - 1 < 0) {
val.i64 = op2_insn->val.u64 - ctx->ir_base[op1_insn->op2].val.u64;
if (val.i64 < 0 && val.i64 != INT64_MIN) {
val.i64 = -val.i64;
opt++; /* ADD -> SUB */
}
Expand All @@ -2566,7 +2566,7 @@ IR_FOLD(ADD(SUB, C_I64))
IR_FOLD_RESTART;
} else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) {
/* (c1 - x) + c2 => (c1 + c2) - x */
val.i64 = ctx->ir_base[op1_insn->op1].val.i64 + op2_insn->val.i64;
val.i64 = ctx->ir_base[op1_insn->op1].val.u64 + op2_insn->val.u64;
opt++; /* ADD -> SUB */
op2 = op1_insn->op2;
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
Expand Down Expand Up @@ -2599,8 +2599,8 @@ IR_FOLD(SUB(ADD, C_I64))
{
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
/* (x + c1) - c2 => x + (c1 - c2) */
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 - op2_insn->val.i64;
if (val.i64 < 0 && val.i64 - 1 < 0) {
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 - op2_insn->val.u64;
if (val.i64 < 0 && val.i64 != INT64_MIN) {
val.i64 = -val.i64;
} else {
opt--; /* SUB -> ADD */
Expand Down Expand Up @@ -2635,7 +2635,7 @@ IR_FOLD(SUB(C_I64, ADD))
{
if (IR_IS_CONST_REF(op2_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op2].op)) {
/* c1 - (x + c2) => (c1 - c2) - x */
val.i64 = op1_insn->val.i64 - ctx->ir_base[op2_insn->op2].val.i64;
val.i64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op2].val.u64;
op2 = op2_insn->op1;
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
IR_FOLD_RESTART;
Expand All @@ -2652,7 +2652,7 @@ IR_FOLD(SUB(SUB, C_ADDR))
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
/* (x - c1) - c2 => x - (c1 + c2) */
val.u64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
if (val.i64 < 0 && val.i64 - 1 < 0) {
if (val.i64 < 0 && val.i64 != INT64_MIN) {
val.i64 = -val.i64;
opt--; /* SUB -> ADD */
}
Expand All @@ -2676,8 +2676,8 @@ IR_FOLD(SUB(SUB, C_I64))
{
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
/* (x - c1) - c2 => x - (c1 + c2) */
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 + op2_insn->val.i64;
if (val.i64 < 0 && val.i64 - 1 < 0) {
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64;
if (val.i64 < 0 && val.i64 != INT64_MIN) {
val.i64 = -val.i64;
opt--; /* SUB -> ADD */
}
Expand All @@ -2686,7 +2686,7 @@ IR_FOLD(SUB(SUB, C_I64))
IR_FOLD_RESTART;
} else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) {
/* (c1 - x) - c2 => (c1 - c2) - x */
val.i64 = ctx->ir_base[op1_insn->op1].val.i64 - op2_insn->val.i64;
val.i64 = ctx->ir_base[op1_insn->op1].val.u64 - op2_insn->val.u64;
op2 = op1_insn->op2;
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
IR_FOLD_RESTART;
Expand All @@ -2709,7 +2709,7 @@ IR_FOLD(SUB(C_ADDR, SUB))
} else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) {
/* c1 - (c2 - x) => x + (c1 - c2) */
val.u64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op1].val.u64;
if (val.i64 < 0 && val.i64 - 1 < 0) {
if (val.i64 < 0 && val.i64 != INT64_MIN) {
val.i64 = -val.i64;
opt++; /* ADD -> SUB */
}
Expand All @@ -2727,14 +2727,14 @@ IR_FOLD(SUB(C_I64, SUB))
{
if (IR_IS_CONST_REF(op2_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op2].op)) {
/* c1 - (x - c2) => (c1 + c2) - x */
val.i64 = op1_insn->val.i64 + ctx->ir_base[op2_insn->op2].val.i64;
val.i64 = op1_insn->val.u64 + ctx->ir_base[op2_insn->op2].val.u64;
op2 = op2_insn->op1;
op1 = ir_const(ctx, val, IR_OPT_TYPE(opt));
IR_FOLD_RESTART;
} else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) {
/* c1 - (c2 - x) => x + (c1 - c2) */
val.i64 = op1_insn->val.i64 - ctx->ir_base[op2_insn->op1].val.i64;
if (val.i64 < 0 && val.i64 - 1 < 0) {
val.i64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op1].val.u64;
if (val.i64 < 0 && val.i64 != INT64_MIN) {
val.i64 = -val.i64;
opt++; /* ADD -> SUB */
}
Expand Down Expand Up @@ -2768,7 +2768,7 @@ IR_FOLD(MUL(MUL, C_I64))
{
if (IR_IS_CONST_REF(op1_insn->op2) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op2].op)) {
/* (x * c1) * c2 => x * (c1 * c2) */
val.i64 = ctx->ir_base[op1_insn->op2].val.i64 * op2_insn->val.i64;
val.i64 = ctx->ir_base[op1_insn->op2].val.u64 * op2_insn->val.u64;
op1 = op1_insn->op1;
op2 = ir_const(ctx, val, IR_OPT_TYPE(opt));
IR_FOLD_RESTART;
Expand Down

0 comments on commit e5b2d5a

Please sign in to comment.