x86 asm: move rotation and bit instructoins in from x86-assembly-cheat

This commit is contained in:
Ciro Santilli 六四事件 法轮功
2019-06-16 00:00:03 +00:00
parent 89084d2332
commit 658ac53d0f
10 changed files with 313 additions and 0 deletions

42
userland/arch/x86_64/bt.S Normal file
View File

@@ -0,0 +1,42 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
/* 0101 1010 */
mov $0x5A, %r12
bt $0, %r12w
LKMC_ASSERT(jnc)
bt $1, %r12w
LKMC_ASSERT(jc)
bt $2, %r12w
LKMC_ASSERT(jnc)
bt $3, %r12w
LKMC_ASSERT(jc)
bt $4, %r12w
LKMC_ASSERT(jc)
bt $5, %r12w
LKMC_ASSERT(jnc)
bt $6, %r12w
LKMC_ASSERT(jc)
bt $7, %r12w
LKMC_ASSERT(jnc)
/* The register is unchanged. */
LKMC_ASSERT_EQ(%r12, $0x5A)
#if 0
/* There is no Byte decoding for bt:
* Error: operand size mismatch for `bt'
*/
bt $0, %r12b
#endif
LKMC_EPILOGUE

View File

@@ -0,0 +1,16 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
/* 0101 1010 */
mov $0x5A, %r12
btc $0, %r12
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $0x5B)
/* 0101 1010 */
btc $0, %r12
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $0x5A)
LKMC_EPILOGUE

View File

@@ -0,0 +1,16 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
/* 0101 1010 */
mov $0x5A, %r12
btr $1, %r12
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $0x58)
/* 0101 1000 */
btr $1, %r12
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $0x58)
LKMC_EPILOGUE

View File

@@ -0,0 +1,38 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
mov $0x81, %r12
clc
rcl $1, %r12b
/* We'll have to save and restore flags across our asserts!
* 2x PUSHF to maintain 16-bit stack alignment.
* https://github.com/cirosantilli/linux-kernel-module-cheat#x86_64-calling-convention
*/
pushf
pushf
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $2)
popf
rcl $1, %r12b
pushf
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $5)
popf
rcr $2, %r12b
pushf
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $0x81)
popf
rcr $1, %r12b
pushf
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $0x40)
add $16, %rsp
LKMC_EPILOGUE

View File

@@ -0,0 +1,27 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
mov $0x81, %r12
/* axl = 03h, CF = 1 */
rol $1, %r12b
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $3)
/* axl = 04h, CF = 0 */
rol $1, %r12b
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $6)
/* axl = 03h, CF = 0 */
ror $2, %r12b
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $0x81)
/* axl = 81h, CF = 1 */
ror $1, %r12b
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $0x0C0)
LKMC_EPILOGUE

View File

@@ -0,0 +1,24 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
/* 0xFF == -1 in 2's complement with 8-bits. */
mov $0xFF, %r12
sal %r12b
LKMC_ASSERT(jc)
/* 0xFE == -2 in 2's complement with 8-bits. */
LKMC_ASSERT_EQ(%r12, $0xFE)
/* SAR*/
sar %r12b
LKMC_ASSERT(jnc)
/* -1 */
LKMC_ASSERT_EQ(%r12, $0xFF)
/* SAR rounds to -infinity: -1 goes to -1 again. */
sar %r12b
LKMC_ASSERT(jc)
/* -1 */
LKMC_ASSERT_EQ(%r12, $0xFF)
LKMC_EPILOGUE

View File

@@ -0,0 +1,26 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
mov $0xFF, %r12
/* Set Carry flag. */
stc
/* Check for carry flag. */
setc %r12b
/* Carry flag was set, so set the r12b to 1. */
LKMC_ASSERT_EQ(%r12, $1)
/* Clear carry flag. */
clc
setc %r12b
LKMC_ASSERT_EQ(%r12, $0)
#if 0
/* The operand size can only be one byte:
* Error: operand size mismatch for `setc'
*/
setc %eax
#endif
LKMC_EPILOGUE

View File

@@ -0,0 +1,43 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-shift-and-rotate-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
mov $0x81, %r12
/* Shift left by one. */
shl %r12b
LKMC_ASSERT(jc)
LKMC_ASSERT_EQ(%r12, $2)
/* Shift left by one. */
shl %r12b
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $4)
/* Shift right by one. */
shr %r12b
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $2)
/* Shift left by 2 immediate.
* Differentent coding than shift by 1.
*/
shl $2, %r12b
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $8)
/* Shift left by 2 in cl register. */
mov $2, %cl
shl %cl, %r12b
LKMC_ASSERT(jnc)
LKMC_ASSERT_EQ(%r12, $0x20)
#if 0
/* cl is the only possible register choice
* Error: operand type mismatch for `shr'
*/
shr %bl, %ax
#endif
LKMC_EPILOGUE

View File

@@ -0,0 +1,24 @@
/* https://github.com/cirosantilli/linux-kernel-module-cheat#x86-bit-and-byte-instructions */
#include <lkmc.h>
LKMC_PROLOGUE
/* 0xF0 & 0x00 == 0x00 */
mov $0xF0, %r12
test $0, %r12b
/* The comparison was equal 0. */
LKMC_ASSERT(jz)
/* r12 is unchanged. */
LKMC_ASSERT_EQ(%r12, $0x0F0)
/* 0xF0 & 0x18 == 0x10 != 0x00 */
mov $0xF0, %r12
test $0x18, %r12b
LKMC_ASSERT(jnz)
LKMC_ASSERT_EQ(%r12, $0x0F0)
/* test %rax, %rax vs cmp $0, %rax: test produces a shorter
* encoding to decide if a register equals zero or not.
* http://stackoverflow.com/questions/147173/x86-assembly-testl-eax-against-eax
*/
LKMC_EPILOGUE