| 1 | /*
|
|---|
| 2 | Unix SMB/CIFS implementation.
|
|---|
| 3 | SMB Byte handling
|
|---|
| 4 | Copyright (C) Andrew Tridgell 1992-1998
|
|---|
| 5 |
|
|---|
| 6 | This program is free software; you can redistribute it and/or modify
|
|---|
| 7 | it under the terms of the GNU General Public License as published by
|
|---|
| 8 | the Free Software Foundation; either version 3 of the License, or
|
|---|
| 9 | (at your option) any later version.
|
|---|
| 10 |
|
|---|
| 11 | This program is distributed in the hope that it will be useful,
|
|---|
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|---|
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|---|
| 14 | GNU General Public License for more details.
|
|---|
| 15 |
|
|---|
| 16 | You should have received a copy of the GNU General Public License
|
|---|
| 17 | along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|---|
| 18 | */
|
|---|
| 19 |
|
|---|
| 20 | #ifndef _BYTEORDER_H
|
|---|
| 21 | #define _BYTEORDER_H
|
|---|
| 22 |
|
|---|
| 23 | /*
|
|---|
| 24 | This file implements macros for machine independent short and
|
|---|
| 25 | int manipulation
|
|---|
| 26 |
|
|---|
| 27 | Here is a description of this file that I emailed to the samba list once:
|
|---|
| 28 |
|
|---|
| 29 | > I am confused about the way that byteorder.h works in Samba. I have
|
|---|
| 30 | > looked at it, and I would have thought that you might make a distinction
|
|---|
| 31 | > between LE and BE machines, but you only seem to distinguish between 386
|
|---|
| 32 | > and all other architectures.
|
|---|
| 33 | >
|
|---|
| 34 | > Can you give me a clue?
|
|---|
| 35 |
|
|---|
| 36 | sure.
|
|---|
| 37 |
|
|---|
| 38 | The distinction between 386 and other architectures is only there as
|
|---|
| 39 | an optimisation. You can take it out completely and it will make no
|
|---|
| 40 | difference. The routines (macros) in byteorder.h are totally byteorder
|
|---|
| 41 | independent. The 386 optimsation just takes advantage of the fact that
|
|---|
| 42 | the x86 processors don't care about alignment, so we don't have to
|
|---|
| 43 | align ints on int boundaries etc. If there are other processors out
|
|---|
| 44 | there that aren't alignment sensitive then you could also define
|
|---|
| 45 | CAREFUL_ALIGNMENT=0 on those processors as well.
|
|---|
| 46 |
|
|---|
| 47 | Ok, now to the macros themselves. I'll take a simple example, say we
|
|---|
| 48 | want to extract a 2 byte integer from a SMB packet and put it into a
|
|---|
| 49 | type called uint16_t that is in the local machines byte order, and you
|
|---|
| 50 | want to do it with only the assumption that uint16_t is _at_least_ 16
|
|---|
| 51 | bits long (this last condition is very important for architectures
|
|---|
| 52 | that don't have any int types that are 2 bytes long)
|
|---|
| 53 |
|
|---|
| 54 | You do this:
|
|---|
| 55 |
|
|---|
| 56 | #define CVAL(buf,pos) (((uint8_t *)(buf))[pos])
|
|---|
| 57 | #define PVAL(buf,pos) ((unsigned int)CVAL(buf,pos))
|
|---|
| 58 | #define SVAL(buf,pos) (PVAL(buf,pos)|PVAL(buf,(pos)+1)<<8)
|
|---|
| 59 |
|
|---|
| 60 | then to extract a uint16_t value at offset 25 in a buffer you do this:
|
|---|
| 61 |
|
|---|
| 62 | char *buffer = foo_bar();
|
|---|
| 63 | uint16_t xx = SVAL(buffer,25);
|
|---|
| 64 |
|
|---|
| 65 | We are using the byteoder independence of the ANSI C bitshifts to do
|
|---|
| 66 | the work. A good optimising compiler should turn this into efficient
|
|---|
| 67 | code, especially if it happens to have the right byteorder :-)
|
|---|
| 68 |
|
|---|
| 69 | I know these macros can be made a bit tidier by removing some of the
|
|---|
| 70 | casts, but you need to look at byteorder.h as a whole to see the
|
|---|
| 71 | reasoning behind them. byteorder.h defines the following macros:
|
|---|
| 72 |
|
|---|
| 73 | SVAL(buf,pos) - extract a 2 byte SMB value
|
|---|
| 74 | IVAL(buf,pos) - extract a 4 byte SMB value
|
|---|
| 75 | BVAL(buf,pos) - extract a 8 byte SMB value
|
|---|
| 76 | SVALS(buf,pos) - signed version of SVAL()
|
|---|
| 77 | IVALS(buf,pos) - signed version of IVAL()
|
|---|
| 78 | BVALS(buf,pos) - signed version of BVAL()
|
|---|
| 79 |
|
|---|
| 80 | SSVAL(buf,pos,val) - put a 2 byte SMB value into a buffer
|
|---|
| 81 | SIVAL(buf,pos,val) - put a 4 byte SMB value into a buffer
|
|---|
| 82 | SBVAL(buf,pos,val) - put a 8 byte SMB value into a buffer
|
|---|
| 83 | SSVALS(buf,pos,val) - signed version of SSVAL()
|
|---|
| 84 | SIVALS(buf,pos,val) - signed version of SIVAL()
|
|---|
| 85 | SBVALS(buf,pos,val) - signed version of SBVAL()
|
|---|
| 86 |
|
|---|
| 87 | RSVAL(buf,pos) - like SVAL() but for NMB byte ordering
|
|---|
| 88 | RSVALS(buf,pos) - like SVALS() but for NMB byte ordering
|
|---|
| 89 | RIVAL(buf,pos) - like IVAL() but for NMB byte ordering
|
|---|
| 90 | RIVALS(buf,pos) - like IVALS() but for NMB byte ordering
|
|---|
| 91 | RSSVAL(buf,pos,val) - like SSVAL() but for NMB ordering
|
|---|
| 92 | RSIVAL(buf,pos,val) - like SIVAL() but for NMB ordering
|
|---|
| 93 | RSIVALS(buf,pos,val) - like SIVALS() but for NMB ordering
|
|---|
| 94 |
|
|---|
| 95 | it also defines lots of intermediate macros, just ignore those :-)
|
|---|
| 96 |
|
|---|
| 97 | */
|
|---|
| 98 |
|
|---|
| 99 |
|
|---|
| 100 | /*
|
|---|
| 101 | on powerpc we can use the magic instructions to load/store
|
|---|
| 102 | in little endian
|
|---|
| 103 | */
|
|---|
| 104 | #if (defined(__powerpc__) && defined(__GNUC__))
|
|---|
| 105 | static __inline__ uint16_t ld_le16(const uint16_t *addr)
|
|---|
| 106 | {
|
|---|
| 107 | uint16_t val;
|
|---|
| 108 | __asm__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
|---|
| 109 | return val;
|
|---|
| 110 | }
|
|---|
| 111 |
|
|---|
| 112 | static __inline__ void st_le16(uint16_t *addr, const uint16_t val)
|
|---|
| 113 | {
|
|---|
| 114 | __asm__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
|---|
| 115 | }
|
|---|
| 116 |
|
|---|
| 117 | static __inline__ uint32_t ld_le32(const uint32_t *addr)
|
|---|
| 118 | {
|
|---|
| 119 | uint32_t val;
|
|---|
| 120 | __asm__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
|---|
| 121 | return val;
|
|---|
| 122 | }
|
|---|
| 123 |
|
|---|
| 124 | static __inline__ void st_le32(uint32_t *addr, const uint32_t val)
|
|---|
| 125 | {
|
|---|
| 126 | __asm__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
|---|
| 127 | }
|
|---|
| 128 | #define HAVE_ASM_BYTEORDER 1
|
|---|
| 129 | #else
|
|---|
| 130 | #define HAVE_ASM_BYTEORDER 0
|
|---|
| 131 | #endif
|
|---|
| 132 |
|
|---|
| 133 |
|
|---|
| 134 |
|
|---|
| 135 | #undef CAREFUL_ALIGNMENT
|
|---|
| 136 |
|
|---|
| 137 | /* we know that the 386 can handle misalignment and has the "right"
|
|---|
| 138 | byteorder */
|
|---|
| 139 | #if defined(__i386__)
|
|---|
| 140 | #define CAREFUL_ALIGNMENT 0
|
|---|
| 141 | #endif
|
|---|
| 142 |
|
|---|
| 143 | #ifndef CAREFUL_ALIGNMENT
|
|---|
| 144 | #define CAREFUL_ALIGNMENT 1
|
|---|
| 145 | #endif
|
|---|
| 146 |
|
|---|
| 147 | #define CVAL(buf,pos) ((unsigned int)(((const uint8_t *)(buf))[pos]))
|
|---|
| 148 | #define CVAL_NC(buf,pos) (((uint8_t *)(buf))[pos]) /* Non-const version of CVAL */
|
|---|
| 149 | #define PVAL(buf,pos) (CVAL(buf,pos))
|
|---|
| 150 | #define SCVAL(buf,pos,val) (CVAL_NC(buf,pos) = (val))
|
|---|
| 151 |
|
|---|
| 152 | #if HAVE_ASM_BYTEORDER
|
|---|
| 153 |
|
|---|
| 154 | #define _PTRPOS(buf,pos) (((const uint8_t *)(buf))+(pos))
|
|---|
| 155 | #define SVAL(buf,pos) ld_le16((const uint16_t *)_PTRPOS(buf,pos))
|
|---|
| 156 | #define IVAL(buf,pos) ld_le32((const uint32_t *)_PTRPOS(buf,pos))
|
|---|
| 157 | #define SSVAL(buf,pos,val) st_le16((uint16_t *)_PTRPOS(buf,pos), val)
|
|---|
| 158 | #define SIVAL(buf,pos,val) st_le32((uint32_t *)_PTRPOS(buf,pos), val)
|
|---|
| 159 | #define SVALS(buf,pos) ((int16_t)SVAL(buf,pos))
|
|---|
| 160 | #define IVALS(buf,pos) ((int32_t)IVAL(buf,pos))
|
|---|
| 161 | #define SSVALS(buf,pos,val) SSVAL((buf),(pos),((int16_t)(val)))
|
|---|
| 162 | #define SIVALS(buf,pos,val) SIVAL((buf),(pos),((int32_t)(val)))
|
|---|
| 163 |
|
|---|
| 164 | #elif CAREFUL_ALIGNMENT
|
|---|
| 165 |
|
|---|
| 166 | #define SVAL(buf,pos) (PVAL(buf,pos)|PVAL(buf,(pos)+1)<<8)
|
|---|
| 167 | #define IVAL(buf,pos) (SVAL(buf,pos)|SVAL(buf,(pos)+2)<<16)
|
|---|
| 168 | #define SSVALX(buf,pos,val) (CVAL_NC(buf,pos)=(uint8_t)((val)&0xFF),CVAL_NC(buf,pos+1)=(uint8_t)((val)>>8))
|
|---|
| 169 | #define SIVALX(buf,pos,val) (SSVALX(buf,pos,val&0xFFFF),SSVALX(buf,pos+2,val>>16))
|
|---|
| 170 | #define SVALS(buf,pos) ((int16_t)SVAL(buf,pos))
|
|---|
| 171 | #define IVALS(buf,pos) ((int32_t)IVAL(buf,pos))
|
|---|
| 172 | #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((uint16_t)(val)))
|
|---|
| 173 | #define SIVAL(buf,pos,val) SIVALX((buf),(pos),((uint32_t)(val)))
|
|---|
| 174 | #define SSVALS(buf,pos,val) SSVALX((buf),(pos),((int16_t)(val)))
|
|---|
| 175 | #define SIVALS(buf,pos,val) SIVALX((buf),(pos),((int32_t)(val)))
|
|---|
| 176 |
|
|---|
| 177 | #else /* not CAREFUL_ALIGNMENT */
|
|---|
| 178 |
|
|---|
| 179 | /* this handles things for architectures like the 386 that can handle
|
|---|
| 180 | alignment errors */
|
|---|
| 181 | /*
|
|---|
| 182 | WARNING: This section is dependent on the length of int16_t and int32_t
|
|---|
| 183 | being correct
|
|---|
| 184 | */
|
|---|
| 185 |
|
|---|
| 186 | /* get single value from an SMB buffer */
|
|---|
| 187 | #define SVAL(buf,pos) (*(const uint16_t *)((const char *)(buf) + (pos)))
|
|---|
| 188 | #define SVAL_NC(buf,pos) (*(uint16_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
|
|---|
| 189 | #define IVAL(buf,pos) (*(const uint32_t *)((const char *)(buf) + (pos)))
|
|---|
| 190 | #define IVAL_NC(buf,pos) (*(uint32_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
|
|---|
| 191 | #define SVALS(buf,pos) (*(const int16_t *)((const char *)(buf) + (pos)))
|
|---|
| 192 | #define SVALS_NC(buf,pos) (*(int16_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
|
|---|
| 193 | #define IVALS(buf,pos) (*(const int32_t *)((const char *)(buf) + (pos)))
|
|---|
| 194 | #define IVALS_NC(buf,pos) (*(int32_t *)((void *)((char *)(buf) + (pos)))) /* Non const version of above. */
|
|---|
| 195 |
|
|---|
| 196 | /* store single value in an SMB buffer */
|
|---|
| 197 | #define SSVAL(buf,pos,val) SVAL_NC(buf,pos)=((uint16_t)(val))
|
|---|
| 198 | #define SIVAL(buf,pos,val) IVAL_NC(buf,pos)=((uint32_t)(val))
|
|---|
| 199 | #define SSVALS(buf,pos,val) SVALS_NC(buf,pos)=((int16_t)(val))
|
|---|
| 200 | #define SIVALS(buf,pos,val) IVALS_NC(buf,pos)=((int32_t)(val))
|
|---|
| 201 |
|
|---|
| 202 | #endif /* not CAREFUL_ALIGNMENT */
|
|---|
| 203 |
|
|---|
| 204 | /* 64 bit macros */
|
|---|
| 205 | #define BVAL(p, ofs) (IVAL(p,ofs) | (((uint64_t)IVAL(p,(ofs)+4)) << 32))
|
|---|
| 206 | #define BVALS(p, ofs) ((int64_t)BVAL(p,ofs))
|
|---|
| 207 | #define SBVAL(p, ofs, v) (SIVAL(p,ofs,(v)&0xFFFFFFFF), SIVAL(p,(ofs)+4,((uint64_t)(v))>>32))
|
|---|
| 208 | #define SBVALS(p, ofs, v) (SBVAL(p,ofs,(uint64_t)v))
|
|---|
| 209 |
|
|---|
| 210 | /* now the reverse routines - these are used in nmb packets (mostly) */
|
|---|
| 211 | #define SREV(x) ((((x)&0xFF)<<8) | (((x)>>8)&0xFF))
|
|---|
| 212 | #define IREV(x) ((SREV(x)<<16) | (SREV((x)>>16)))
|
|---|
| 213 | #define BREV(x) ((IREV(x)<<32) | (IREV((x)>>32)))
|
|---|
| 214 |
|
|---|
| 215 | #define RSVAL(buf,pos) SREV(SVAL(buf,pos))
|
|---|
| 216 | #define RSVALS(buf,pos) SREV(SVALS(buf,pos))
|
|---|
| 217 | #define RIVAL(buf,pos) IREV(IVAL(buf,pos))
|
|---|
| 218 | #define RIVALS(buf,pos) IREV(IVALS(buf,pos))
|
|---|
| 219 | #define RBVAL(buf,pos) BREV(BVAL(buf,pos))
|
|---|
| 220 | #define RBVALS(buf,pos) BREV(BVALS(buf,pos))
|
|---|
| 221 | #define RSSVAL(buf,pos,val) SSVAL(buf,pos,SREV(val))
|
|---|
| 222 | #define RSSVALS(buf,pos,val) SSVALS(buf,pos,SREV(val))
|
|---|
| 223 | #define RSIVAL(buf,pos,val) SIVAL(buf,pos,IREV(val))
|
|---|
| 224 | #define RSIVALS(buf,pos,val) SIVALS(buf,pos,IREV(val))
|
|---|
| 225 | #define RSBVAL(buf,pos,val) SBVAL(buf,pos,BREV(val))
|
|---|
| 226 | #define RSBVALS(buf,pos,val) SBVALS(buf,pos,BREV(val))
|
|---|
| 227 |
|
|---|
| 228 | /* Alignment macros. */
|
|---|
| 229 | #define ALIGN4(p,base) ((p) + ((4 - (PTR_DIFF((p), (base)) & 3)) & 3))
|
|---|
| 230 | #define ALIGN2(p,base) ((p) + ((2 - (PTR_DIFF((p), (base)) & 1)) & 1))
|
|---|
| 231 |
|
|---|
| 232 |
|
|---|
| 233 | /* macros for accessing SMB protocol elements */
|
|---|
| 234 | #define VWV(vwv) ((vwv)*2)
|
|---|
| 235 |
|
|---|
| 236 | #endif /* _BYTEORDER_H */
|
|---|