I wrote a C++ library called Celero designed to test just such optimizations and alternatives. (Shameless self promotion: https://github.com/DigitalInBlue/Celero)
I ran your cases using the following code:
class StackOverflowFixture : public celero::TestFixture
{
public:
StackOverflowFixture()
{
}
inline bool NoOp(bool greater, int p1, int p2)
{
return true;
}
inline bool Compare(bool greater, int p1, int p2)
{
if(greater == true)
{
return p1>=p2;
}
return p1<=p2;
}
inline bool Compare2(bool greater, int p1, int p2)
{
bool ret[2] = {p1<=p2,p1>=p2};
return ret[greater];
}
inline bool Compare3(bool greater, int p1, int p2)
{
return (!greater != !(p1 <= p2)) | (p1 == p2);
}
inline bool Compare4(bool greater, int p1, int p2)
{
return (greater ^ (p1 <= p2)) | (p1 == p2);
}
};
BASELINE_F(StackOverflow, Baseline, StackOverflowFixture, 100, 5000000)
{
celero::DoNotOptimizeAway(NoOp(rand()%2, rand(), rand()));
}
BENCHMARK_F(StackOverflow, Compare, StackOverflowFixture, 100, 5000000)
{
celero::DoNotOptimizeAway(Compare(rand()%2, rand(), rand()));
}
BENCHMARK_F(StackOverflow, Compare2, StackOverflowFixture, 100, 5000000)
{
celero::DoNotOptimizeAway(Compare2(rand()%2, rand(), rand()));
}
BENCHMARK_F(StackOverflow, Compare3, StackOverflowFixture, 100, 5000000)
{
celero::DoNotOptimizeAway(Compare3(rand()%2, rand(), rand()));
}
BENCHMARK_F(StackOverflow, Compare4, StackOverflowFixture, 100, 5000000)
{
celero::DoNotOptimizeAway(Compare4(rand()%2, rand(), rand()));
}
The results are shown below:
[==========]
[ CELERO ]
[==========]
[ STAGE ] Baselining
[==========]
[ RUN ] StackOverflow.Baseline -- 100 samples, 5000000 calls per run.
[ DONE ] StackOverflow.Baseline (0.690499 sec) [5000000 calls in 690499 usec] [0.138100 us/call] [7241140.103027 calls/sec]
[==========]
[ STAGE ] Benchmarking
[==========]
[ RUN ] StackOverflow.Compare -- 100 samples, 5000000 calls per run.
[ DONE ] StackOverflow.Compare (0.782818 sec) [5000000 calls in 782818 usec] [0.156564 us/call] [6387180.672902 calls/sec]
[ BASELINE ] StackOverflow.Compare 1.133699
[ RUN ] StackOverflow.Compare2 -- 100 samples, 5000000 calls per run.
[ DONE ] StackOverflow.Compare2 (0.700767 sec) [5000000 calls in 700767 usec] [0.140153 us/call] [7135039.178500 calls/sec]
[ BASELINE ] StackOverflow.Compare2 1.014870
[ RUN ] StackOverflow.Compare3 -- 100 samples, 5000000 calls per run.
[ DONE ] StackOverflow.Compare3 (0.709471 sec) [5000000 calls in 709471 usec] [0.141894 us/call] [7047504.408214 calls/sec]
[ BASELINE ] StackOverflow.Compare3 1.027476
[ RUN ] StackOverflow.Compare4 -- 100 samples, 5000000 calls per run.
[ DONE ] StackOverflow.Compare4 (0.712940 sec) [5000000 calls in 712940 usec] [0.142588 us/call] [7013212.893091 calls/sec]
[ BASELINE ] StackOverflow.Compare4 1.032500
[==========]
[ COMPLETE ]
[==========]
Given this test, it looks like Compare2 is the best option for this micro-optimization.
EDIT:
Compare2 Assembly (The best case):
cmp r8d, r9d
movzx eax, dl
setle BYTE PTR ret$[rsp]
cmp r8d, r9d
setge BYTE PTR ret$[rsp+1]
movzx eax, BYTE PTR ret$[rsp+rax]
Compare3 Assembly (The next-best case):
xor r11d, r11d
cmp r8d, r9d
mov r10d, r11d
setg r10b
test dl, dl
mov ecx, r11d
sete cl
mov eax, r11d
cmp ecx, r10d
setne al
cmp r8d, r9d
sete r11b
or eax, r11d