mirror of
https://github.com/Motorhead1991/qemu.git
synced 2025-08-01 23:03:54 -06:00
softfloat: Introduce sh[lr]_double primitives
Have x86_64 assembly for them, with a fallback. This avoids shuffling values through %cl in the x86 case. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
5ffb6bd9c4
commit
463e45dcb4
2 changed files with 115 additions and 23 deletions
|
@ -85,6 +85,42 @@ this code that are retained.
|
|||
#include "fpu/softfloat-types.h"
|
||||
#include "qemu/host-utils.h"
|
||||
|
||||
/**
|
||||
* shl_double: double-word merging left shift
|
||||
* @l: left or most-significant word
|
||||
* @r: right or least-significant word
|
||||
* @c: shift count
|
||||
*
|
||||
* Shift @l left by @c bits, shifting in bits from @r.
|
||||
*/
|
||||
static inline uint64_t shl_double(uint64_t l, uint64_t r, int c)
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
asm("shld %b2, %1, %0" : "+r"(l) : "r"(r), "ci"(c));
|
||||
return l;
|
||||
#else
|
||||
return c ? (l << c) | (r >> (64 - c)) : l;
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* shr_double: double-word merging right shift
|
||||
* @l: left or most-significant word
|
||||
* @r: right or least-significant word
|
||||
* @c: shift count
|
||||
*
|
||||
* Shift @r right by @c bits, shifting in bits from @l.
|
||||
*/
|
||||
static inline uint64_t shr_double(uint64_t l, uint64_t r, int c)
|
||||
{
|
||||
#if defined(__x86_64__)
|
||||
asm("shrd %b2, %1, %0" : "+r"(r) : "r"(l), "ci"(c));
|
||||
return r;
|
||||
#else
|
||||
return c ? (r >> c) | (l << (64 - c)) : r;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*----------------------------------------------------------------------------
|
||||
| Shifts `a' right by the number of bits given in `count'. If any nonzero
|
||||
| bits are shifted off, they are ``jammed'' into the least significant bit of
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue