|
1 /* |
|
2 * This file is part of Libav. |
|
3 * |
|
4 * Libav is free software; you can redistribute it and/or |
|
5 * modify it under the terms of the GNU Lesser General Public |
|
6 * License as published by the Free Software Foundation; either |
|
7 * version 2.1 of the License, or (at your option) any later version. |
|
8 * |
|
9 * Libav is distributed in the hope that it will be useful, |
|
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
12 * Lesser General Public License for more details. |
|
13 * |
|
14 * You should have received a copy of the GNU Lesser General Public |
|
15 * License along with Libav; if not, write to the Free Software |
|
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
|
17 */ |
|
18 |
|
19 #ifndef AVUTIL_INTREADWRITE_H |
|
20 #define AVUTIL_INTREADWRITE_H |
|
21 |
|
22 #include <stdint.h> |
|
23 #include "libavutil/avconfig.h" |
|
24 #include "attributes.h" |
|
25 #include "bswap.h" |
|
26 |
|
27 typedef union { |
|
28 uint64_t u64; |
|
29 uint32_t u32[2]; |
|
30 uint16_t u16[4]; |
|
31 uint8_t u8 [8]; |
|
32 double f64; |
|
33 float f32[2]; |
|
34 } av_alias av_alias64; |
|
35 |
|
36 typedef union { |
|
37 uint32_t u32; |
|
38 uint16_t u16[2]; |
|
39 uint8_t u8 [4]; |
|
40 float f32; |
|
41 } av_alias av_alias32; |
|
42 |
|
43 typedef union { |
|
44 uint16_t u16; |
|
45 uint8_t u8 [2]; |
|
46 } av_alias av_alias16; |
|
47 |
|
48 /* |
|
49 * Arch-specific headers can provide any combination of |
|
50 * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. |
|
51 * Preprocessor symbols must be defined, even if these are implemented |
|
52 * as inline functions. |
|
53 */ |
|
54 |
|
55 #ifdef HAVE_AV_CONFIG_H |
|
56 |
|
57 #include "config.h" |
|
58 |
|
59 #if ARCH_ARM |
|
60 # include "arm/intreadwrite.h" |
|
61 #elif ARCH_AVR32 |
|
62 # include "avr32/intreadwrite.h" |
|
63 #elif ARCH_MIPS |
|
64 # include "mips/intreadwrite.h" |
|
65 #elif ARCH_PPC |
|
66 # include "ppc/intreadwrite.h" |
|
67 #elif ARCH_TOMI |
|
68 # include "tomi/intreadwrite.h" |
|
69 #elif ARCH_X86 |
|
70 # include "x86/intreadwrite.h" |
|
71 #endif |
|
72 |
|
73 #endif /* HAVE_AV_CONFIG_H */ |
|
74 |
|
75 /* |
|
76 * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. |
|
77 */ |
|
78 |
|
79 #if AV_HAVE_BIGENDIAN |
|
80 |
|
81 # if defined(AV_RN16) && !defined(AV_RB16) |
|
82 # define AV_RB16(p) AV_RN16(p) |
|
83 # elif !defined(AV_RN16) && defined(AV_RB16) |
|
84 # define AV_RN16(p) AV_RB16(p) |
|
85 # endif |
|
86 |
|
87 # if defined(AV_WN16) && !defined(AV_WB16) |
|
88 # define AV_WB16(p, v) AV_WN16(p, v) |
|
89 # elif !defined(AV_WN16) && defined(AV_WB16) |
|
90 # define AV_WN16(p, v) AV_WB16(p, v) |
|
91 # endif |
|
92 |
|
93 # if defined(AV_RN24) && !defined(AV_RB24) |
|
94 # define AV_RB24(p) AV_RN24(p) |
|
95 # elif !defined(AV_RN24) && defined(AV_RB24) |
|
96 # define AV_RN24(p) AV_RB24(p) |
|
97 # endif |
|
98 |
|
99 # if defined(AV_WN24) && !defined(AV_WB24) |
|
100 # define AV_WB24(p, v) AV_WN24(p, v) |
|
101 # elif !defined(AV_WN24) && defined(AV_WB24) |
|
102 # define AV_WN24(p, v) AV_WB24(p, v) |
|
103 # endif |
|
104 |
|
105 # if defined(AV_RN32) && !defined(AV_RB32) |
|
106 # define AV_RB32(p) AV_RN32(p) |
|
107 # elif !defined(AV_RN32) && defined(AV_RB32) |
|
108 # define AV_RN32(p) AV_RB32(p) |
|
109 # endif |
|
110 |
|
111 # if defined(AV_WN32) && !defined(AV_WB32) |
|
112 # define AV_WB32(p, v) AV_WN32(p, v) |
|
113 # elif !defined(AV_WN32) && defined(AV_WB32) |
|
114 # define AV_WN32(p, v) AV_WB32(p, v) |
|
115 # endif |
|
116 |
|
117 # if defined(AV_RN64) && !defined(AV_RB64) |
|
118 # define AV_RB64(p) AV_RN64(p) |
|
119 # elif !defined(AV_RN64) && defined(AV_RB64) |
|
120 # define AV_RN64(p) AV_RB64(p) |
|
121 # endif |
|
122 |
|
123 # if defined(AV_WN64) && !defined(AV_WB64) |
|
124 # define AV_WB64(p, v) AV_WN64(p, v) |
|
125 # elif !defined(AV_WN64) && defined(AV_WB64) |
|
126 # define AV_WN64(p, v) AV_WB64(p, v) |
|
127 # endif |
|
128 |
|
129 #else /* AV_HAVE_BIGENDIAN */ |
|
130 |
|
131 # if defined(AV_RN16) && !defined(AV_RL16) |
|
132 # define AV_RL16(p) AV_RN16(p) |
|
133 # elif !defined(AV_RN16) && defined(AV_RL16) |
|
134 # define AV_RN16(p) AV_RL16(p) |
|
135 # endif |
|
136 |
|
137 # if defined(AV_WN16) && !defined(AV_WL16) |
|
138 # define AV_WL16(p, v) AV_WN16(p, v) |
|
139 # elif !defined(AV_WN16) && defined(AV_WL16) |
|
140 # define AV_WN16(p, v) AV_WL16(p, v) |
|
141 # endif |
|
142 |
|
143 # if defined(AV_RN24) && !defined(AV_RL24) |
|
144 # define AV_RL24(p) AV_RN24(p) |
|
145 # elif !defined(AV_RN24) && defined(AV_RL24) |
|
146 # define AV_RN24(p) AV_RL24(p) |
|
147 # endif |
|
148 |
|
149 # if defined(AV_WN24) && !defined(AV_WL24) |
|
150 # define AV_WL24(p, v) AV_WN24(p, v) |
|
151 # elif !defined(AV_WN24) && defined(AV_WL24) |
|
152 # define AV_WN24(p, v) AV_WL24(p, v) |
|
153 # endif |
|
154 |
|
155 # if defined(AV_RN32) && !defined(AV_RL32) |
|
156 # define AV_RL32(p) AV_RN32(p) |
|
157 # elif !defined(AV_RN32) && defined(AV_RL32) |
|
158 # define AV_RN32(p) AV_RL32(p) |
|
159 # endif |
|
160 |
|
161 # if defined(AV_WN32) && !defined(AV_WL32) |
|
162 # define AV_WL32(p, v) AV_WN32(p, v) |
|
163 # elif !defined(AV_WN32) && defined(AV_WL32) |
|
164 # define AV_WN32(p, v) AV_WL32(p, v) |
|
165 # endif |
|
166 |
|
167 # if defined(AV_RN64) && !defined(AV_RL64) |
|
168 # define AV_RL64(p) AV_RN64(p) |
|
169 # elif !defined(AV_RN64) && defined(AV_RL64) |
|
170 # define AV_RN64(p) AV_RL64(p) |
|
171 # endif |
|
172 |
|
173 # if defined(AV_WN64) && !defined(AV_WL64) |
|
174 # define AV_WL64(p, v) AV_WN64(p, v) |
|
175 # elif !defined(AV_WN64) && defined(AV_WL64) |
|
176 # define AV_WN64(p, v) AV_WL64(p, v) |
|
177 # endif |
|
178 |
|
179 #endif /* !AV_HAVE_BIGENDIAN */ |
|
180 |
|
181 /* |
|
182 * Define AV_[RW]N helper macros to simplify definitions not provided |
|
183 * by per-arch headers. |
|
184 */ |
|
185 |
|
186 #if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) |
|
187 |
|
188 union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; |
|
189 union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; |
|
190 union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; |
|
191 |
|
192 # define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) |
|
193 # define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) |
|
194 |
|
195 #elif defined(__DECC) |
|
196 |
|
197 # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) |
|
198 # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) |
|
199 |
|
200 #elif AV_HAVE_FAST_UNALIGNED |
|
201 |
|
202 # define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) |
|
203 # define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) |
|
204 |
|
205 #else |
|
206 |
|
207 #ifndef AV_RB16 |
|
208 # define AV_RB16(x) \ |
|
209 ((((const uint8_t*)(x))[0] << 8) | \ |
|
210 ((const uint8_t*)(x))[1]) |
|
211 #endif |
|
212 #ifndef AV_WB16 |
|
213 # define AV_WB16(p, d) do { \ |
|
214 ((uint8_t*)(p))[1] = (d); \ |
|
215 ((uint8_t*)(p))[0] = (d)>>8; \ |
|
216 } while(0) |
|
217 #endif |
|
218 |
|
219 #ifndef AV_RL16 |
|
220 # define AV_RL16(x) \ |
|
221 ((((const uint8_t*)(x))[1] << 8) | \ |
|
222 ((const uint8_t*)(x))[0]) |
|
223 #endif |
|
224 #ifndef AV_WL16 |
|
225 # define AV_WL16(p, d) do { \ |
|
226 ((uint8_t*)(p))[0] = (d); \ |
|
227 ((uint8_t*)(p))[1] = (d)>>8; \ |
|
228 } while(0) |
|
229 #endif |
|
230 |
|
231 #ifndef AV_RB32 |
|
232 # define AV_RB32(x) \ |
|
233 (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ |
|
234 (((const uint8_t*)(x))[1] << 16) | \ |
|
235 (((const uint8_t*)(x))[2] << 8) | \ |
|
236 ((const uint8_t*)(x))[3]) |
|
237 #endif |
|
238 #ifndef AV_WB32 |
|
239 # define AV_WB32(p, d) do { \ |
|
240 ((uint8_t*)(p))[3] = (d); \ |
|
241 ((uint8_t*)(p))[2] = (d)>>8; \ |
|
242 ((uint8_t*)(p))[1] = (d)>>16; \ |
|
243 ((uint8_t*)(p))[0] = (d)>>24; \ |
|
244 } while(0) |
|
245 #endif |
|
246 |
|
247 #ifndef AV_RL32 |
|
248 # define AV_RL32(x) \ |
|
249 (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ |
|
250 (((const uint8_t*)(x))[2] << 16) | \ |
|
251 (((const uint8_t*)(x))[1] << 8) | \ |
|
252 ((const uint8_t*)(x))[0]) |
|
253 #endif |
|
254 #ifndef AV_WL32 |
|
255 # define AV_WL32(p, d) do { \ |
|
256 ((uint8_t*)(p))[0] = (d); \ |
|
257 ((uint8_t*)(p))[1] = (d)>>8; \ |
|
258 ((uint8_t*)(p))[2] = (d)>>16; \ |
|
259 ((uint8_t*)(p))[3] = (d)>>24; \ |
|
260 } while(0) |
|
261 #endif |
|
262 |
|
263 #ifndef AV_RB64 |
|
264 # define AV_RB64(x) \ |
|
265 (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ |
|
266 ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ |
|
267 ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ |
|
268 ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ |
|
269 ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ |
|
270 ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ |
|
271 ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ |
|
272 (uint64_t)((const uint8_t*)(x))[7]) |
|
273 #endif |
|
274 #ifndef AV_WB64 |
|
275 # define AV_WB64(p, d) do { \ |
|
276 ((uint8_t*)(p))[7] = (d); \ |
|
277 ((uint8_t*)(p))[6] = (d)>>8; \ |
|
278 ((uint8_t*)(p))[5] = (d)>>16; \ |
|
279 ((uint8_t*)(p))[4] = (d)>>24; \ |
|
280 ((uint8_t*)(p))[3] = (d)>>32; \ |
|
281 ((uint8_t*)(p))[2] = (d)>>40; \ |
|
282 ((uint8_t*)(p))[1] = (d)>>48; \ |
|
283 ((uint8_t*)(p))[0] = (d)>>56; \ |
|
284 } while(0) |
|
285 #endif |
|
286 |
|
287 #ifndef AV_RL64 |
|
288 # define AV_RL64(x) \ |
|
289 (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ |
|
290 ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ |
|
291 ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ |
|
292 ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ |
|
293 ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ |
|
294 ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ |
|
295 ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ |
|
296 (uint64_t)((const uint8_t*)(x))[0]) |
|
297 #endif |
|
298 #ifndef AV_WL64 |
|
299 # define AV_WL64(p, d) do { \ |
|
300 ((uint8_t*)(p))[0] = (d); \ |
|
301 ((uint8_t*)(p))[1] = (d)>>8; \ |
|
302 ((uint8_t*)(p))[2] = (d)>>16; \ |
|
303 ((uint8_t*)(p))[3] = (d)>>24; \ |
|
304 ((uint8_t*)(p))[4] = (d)>>32; \ |
|
305 ((uint8_t*)(p))[5] = (d)>>40; \ |
|
306 ((uint8_t*)(p))[6] = (d)>>48; \ |
|
307 ((uint8_t*)(p))[7] = (d)>>56; \ |
|
308 } while(0) |
|
309 #endif |
|
310 |
|
311 #if AV_HAVE_BIGENDIAN |
|
312 # define AV_RN(s, p) AV_RB##s(p) |
|
313 # define AV_WN(s, p, v) AV_WB##s(p, v) |
|
314 #else |
|
315 # define AV_RN(s, p) AV_RL##s(p) |
|
316 # define AV_WN(s, p, v) AV_WL##s(p, v) |
|
317 #endif |
|
318 |
|
319 #endif /* HAVE_FAST_UNALIGNED */ |
|
320 |
|
321 #ifndef AV_RN16 |
|
322 # define AV_RN16(p) AV_RN(16, p) |
|
323 #endif |
|
324 |
|
325 #ifndef AV_RN32 |
|
326 # define AV_RN32(p) AV_RN(32, p) |
|
327 #endif |
|
328 |
|
329 #ifndef AV_RN64 |
|
330 # define AV_RN64(p) AV_RN(64, p) |
|
331 #endif |
|
332 |
|
333 #ifndef AV_WN16 |
|
334 # define AV_WN16(p, v) AV_WN(16, p, v) |
|
335 #endif |
|
336 |
|
337 #ifndef AV_WN32 |
|
338 # define AV_WN32(p, v) AV_WN(32, p, v) |
|
339 #endif |
|
340 |
|
341 #ifndef AV_WN64 |
|
342 # define AV_WN64(p, v) AV_WN(64, p, v) |
|
343 #endif |
|
344 |
|
345 #if AV_HAVE_BIGENDIAN |
|
346 # define AV_RB(s, p) AV_RN##s(p) |
|
347 # define AV_WB(s, p, v) AV_WN##s(p, v) |
|
348 # define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) |
|
349 # define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) |
|
350 #else |
|
351 # define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) |
|
352 # define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) |
|
353 # define AV_RL(s, p) AV_RN##s(p) |
|
354 # define AV_WL(s, p, v) AV_WN##s(p, v) |
|
355 #endif |
|
356 |
|
357 #define AV_RB8(x) (((const uint8_t*)(x))[0]) |
|
358 #define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) |
|
359 |
|
360 #define AV_RL8(x) AV_RB8(x) |
|
361 #define AV_WL8(p, d) AV_WB8(p, d) |
|
362 |
|
363 #ifndef AV_RB16 |
|
364 # define AV_RB16(p) AV_RB(16, p) |
|
365 #endif |
|
366 #ifndef AV_WB16 |
|
367 # define AV_WB16(p, v) AV_WB(16, p, v) |
|
368 #endif |
|
369 |
|
370 #ifndef AV_RL16 |
|
371 # define AV_RL16(p) AV_RL(16, p) |
|
372 #endif |
|
373 #ifndef AV_WL16 |
|
374 # define AV_WL16(p, v) AV_WL(16, p, v) |
|
375 #endif |
|
376 |
|
377 #ifndef AV_RB32 |
|
378 # define AV_RB32(p) AV_RB(32, p) |
|
379 #endif |
|
380 #ifndef AV_WB32 |
|
381 # define AV_WB32(p, v) AV_WB(32, p, v) |
|
382 #endif |
|
383 |
|
384 #ifndef AV_RL32 |
|
385 # define AV_RL32(p) AV_RL(32, p) |
|
386 #endif |
|
387 #ifndef AV_WL32 |
|
388 # define AV_WL32(p, v) AV_WL(32, p, v) |
|
389 #endif |
|
390 |
|
391 #ifndef AV_RB64 |
|
392 # define AV_RB64(p) AV_RB(64, p) |
|
393 #endif |
|
394 #ifndef AV_WB64 |
|
395 # define AV_WB64(p, v) AV_WB(64, p, v) |
|
396 #endif |
|
397 |
|
398 #ifndef AV_RL64 |
|
399 # define AV_RL64(p) AV_RL(64, p) |
|
400 #endif |
|
401 #ifndef AV_WL64 |
|
402 # define AV_WL64(p, v) AV_WL(64, p, v) |
|
403 #endif |
|
404 |
|
405 #ifndef AV_RB24 |
|
406 # define AV_RB24(x) \ |
|
407 ((((const uint8_t*)(x))[0] << 16) | \ |
|
408 (((const uint8_t*)(x))[1] << 8) | \ |
|
409 ((const uint8_t*)(x))[2]) |
|
410 #endif |
|
411 #ifndef AV_WB24 |
|
412 # define AV_WB24(p, d) do { \ |
|
413 ((uint8_t*)(p))[2] = (d); \ |
|
414 ((uint8_t*)(p))[1] = (d)>>8; \ |
|
415 ((uint8_t*)(p))[0] = (d)>>16; \ |
|
416 } while(0) |
|
417 #endif |
|
418 |
|
419 #ifndef AV_RL24 |
|
420 # define AV_RL24(x) \ |
|
421 ((((const uint8_t*)(x))[2] << 16) | \ |
|
422 (((const uint8_t*)(x))[1] << 8) | \ |
|
423 ((const uint8_t*)(x))[0]) |
|
424 #endif |
|
425 #ifndef AV_WL24 |
|
426 # define AV_WL24(p, d) do { \ |
|
427 ((uint8_t*)(p))[0] = (d); \ |
|
428 ((uint8_t*)(p))[1] = (d)>>8; \ |
|
429 ((uint8_t*)(p))[2] = (d)>>16; \ |
|
430 } while(0) |
|
431 #endif |
|
432 |
|
433 /* |
|
434 * The AV_[RW]NA macros access naturally aligned data |
|
435 * in a type-safe way. |
|
436 */ |
|
437 |
|
438 #define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) |
|
439 #define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) |
|
440 |
|
441 #ifndef AV_RN16A |
|
442 # define AV_RN16A(p) AV_RNA(16, p) |
|
443 #endif |
|
444 |
|
445 #ifndef AV_RN32A |
|
446 # define AV_RN32A(p) AV_RNA(32, p) |
|
447 #endif |
|
448 |
|
449 #ifndef AV_RN64A |
|
450 # define AV_RN64A(p) AV_RNA(64, p) |
|
451 #endif |
|
452 |
|
453 #ifndef AV_WN16A |
|
454 # define AV_WN16A(p, v) AV_WNA(16, p, v) |
|
455 #endif |
|
456 |
|
457 #ifndef AV_WN32A |
|
458 # define AV_WN32A(p, v) AV_WNA(32, p, v) |
|
459 #endif |
|
460 |
|
461 #ifndef AV_WN64A |
|
462 # define AV_WN64A(p, v) AV_WNA(64, p, v) |
|
463 #endif |
|
464 |
|
465 /* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be |
|
466 * naturally aligned. They may be implemented using MMX, |
|
467 * so emms_c() must be called before using any float code |
|
468 * afterwards. |
|
469 */ |
|
470 |
|
471 #define AV_COPY(n, d, s) \ |
|
472 (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) |
|
473 |
|
474 #ifndef AV_COPY16 |
|
475 # define AV_COPY16(d, s) AV_COPY(16, d, s) |
|
476 #endif |
|
477 |
|
478 #ifndef AV_COPY32 |
|
479 # define AV_COPY32(d, s) AV_COPY(32, d, s) |
|
480 #endif |
|
481 |
|
482 #ifndef AV_COPY64 |
|
483 # define AV_COPY64(d, s) AV_COPY(64, d, s) |
|
484 #endif |
|
485 |
|
486 #ifndef AV_COPY128 |
|
487 # define AV_COPY128(d, s) \ |
|
488 do { \ |
|
489 AV_COPY64(d, s); \ |
|
490 AV_COPY64((char*)(d)+8, (char*)(s)+8); \ |
|
491 } while(0) |
|
492 #endif |
|
493 |
|
494 #define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) |
|
495 |
|
496 #ifndef AV_SWAP64 |
|
497 # define AV_SWAP64(a, b) AV_SWAP(64, a, b) |
|
498 #endif |
|
499 |
|
500 #define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) |
|
501 |
|
502 #ifndef AV_ZERO16 |
|
503 # define AV_ZERO16(d) AV_ZERO(16, d) |
|
504 #endif |
|
505 |
|
506 #ifndef AV_ZERO32 |
|
507 # define AV_ZERO32(d) AV_ZERO(32, d) |
|
508 #endif |
|
509 |
|
510 #ifndef AV_ZERO64 |
|
511 # define AV_ZERO64(d) AV_ZERO(64, d) |
|
512 #endif |
|
513 |
|
514 #ifndef AV_ZERO128 |
|
515 # define AV_ZERO128(d) \ |
|
516 do { \ |
|
517 AV_ZERO64(d); \ |
|
518 AV_ZERO64((char*)(d)+8); \ |
|
519 } while(0) |
|
520 #endif |
|
521 |
|
522 #endif /* AVUTIL_INTREADWRITE_H */ |