3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # This module implements support for Intel AES-NI extension. In
11 # OpenSSL context it's used with Intel engine, but can also be used as
12 # drop-in replacement for crypto/aes/asm/aes-x86_64.pl [see below for
17 # Given aes(enc|dec) instructions' latency asymptotic performance for
18 # non-parallelizable modes such as CBC encrypt is 3.75 cycles per byte
19 # processed with 128-bit key. And given their throughput asymptotic
20 # performance for parallelizable modes is 1.25 cycles per byte. Being
21 # asymptotic limit it's not something you commonly achieve in reality,
22 # but how close does one get? Below are results collected for
23 # different modes and block sized. Pairs of numbers are for en-/
26 # 16-byte 64-byte 256-byte 1-KB 8-KB
27 # ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26
28 # CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26
29 # CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28
30 # CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07
31 # OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38
32 # CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55
34 # ECB, CTR, CBC and CCM results are free from EVP overhead. This means
35 # that otherwise used 'openssl speed -evp aes-128-??? -engine aesni
36 # [-decrypt]' will exhibit 10-15% worse results for smaller blocks.
37 # The results were collected with specially crafted speed.c benchmark
38 # in order to compare them with results reported in "Intel Advanced
39 # Encryption Standard (AES) New Instruction Set" White Paper Revision
40 # 3.0 dated May 2010. All above results are consistently better. This
41 # module also provides better performance for block sizes smaller than
42 # 128 bytes in points *not* represented in the above table.
44 # Looking at the results for 8-KB buffer.
46 # CFB and OFB results are far from the limit, because implementation
47 # uses "generic" CRYPTO_[c|o]fb128_encrypt interfaces relying on
48 # single-block aesni_encrypt, which is not the most optimal way to go.
49 # CBC encrypt result is unexpectedly high and there is no documented
50 # explanation for it. Seemingly there is a small penalty for feeding
51 # the result back to AES unit the way it's done in CBC mode. There is
52 # nothing one can do and the result appears optimal. CCM result is
53 # identical to CBC, because CBC-MAC is essentially CBC encrypt without
54 # saving output. CCM CTR "stays invisible," because it's neatly
55 # interleaved wih CBC-MAC. This provides ~30% improvement over
56 # "straghtforward" CCM implementation with CTR and CBC-MAC performed
57 # disjointly. Parallelizable modes practically achieve the theoretical
60 # Looking at how results vary with buffer size.
62 # Curves are practically saturated at 1-KB buffer size. In most cases
63 # "256-byte" performance is >95%, and "64-byte" is ~90% of "8-KB" one.
64 # CTR curve doesn't follow this pattern and is "slowest" changing one
65 # with "256-byte" result being 87% of "8-KB." This is because overhead
66 # in CTR mode is most computationally intensive. Small-block CCM
67 # decrypt is slower than encrypt, because first CTR and last CBC-MAC
68 # iterations can't be interleaved.
70 # Results for 192- and 256-bit keys.
72 # EVP-free results were observed to scale perfectly with number of
73 # rounds for larger block sizes, i.e. 192-bit result being 10/12 times
74 # lower and 256-bit one - 10/14. Well, in CBC encrypt case differences
75 # are a tad smaller, because the above mentioned penalty biases all
76 # results by same constant value. In similar way function call
77 # overhead affects small-block performance, as well as OFB and CFB
78 # results. Differences are not large, most common coefficients are
79 # 10/11.7 and 10/13.4 (as opposite to 10/12.0 and 10/14.0), but one
80 # observe even 10/11.2 and 10/12.4 (CTR, OFB, CFB)...
84 # While Westmere processor features 6 cycles latency for aes[enc|dec]
85 # instructions, which can be scheduled every second cycle, Sandy
86 # Bridge spends 8 cycles per instruction, but it can schedule them
87 # every cycle. This means that code targeting Westmere would perform
88 # suboptimally on Sandy Bridge. Therefore this update.
90 # In addition, non-parallelizable CBC encrypt (as well as CCM) is
91 # optimized. Relative improvement might appear modest, 8% on Westmere,
92 # but in absolute terms it's 3.77 cycles per byte encrypted with
93 # 128-bit key on Westmere, and 5.07 - on Sandy Bridge. These numbers
94 # should be compared to asymptotic limits of 3.75 for Westmere and
95 # 5.00 for Sandy Bridge. Actually, the fact that they get this close
96 # to asymptotic limits is quite amazing. Indeed, the limit is
97 # calculated as latency times number of rounds, 10 for 128-bit key,
98 # and divided by 16, the number of bytes in block, or in other words
99 # it accounts *solely* for aesenc instructions. But there are extra
100 # instructions, and numbers so close to the asymptotic limits mean
101 # that it's as if it takes as little as *one* additional cycle to
102 # execute all of them. How is it possible? It is possible thanks to
103 # out-of-order execution logic, which manages to overlap post-
104 # processing of previous block, things like saving the output, with
105 # actual encryption of current block, as well as pre-processing of
106 # current block, things like fetching input and xor-ing it with
107 # 0-round element of the key schedule, with actual encryption of
108 # previous block. Keep this in mind...
110 # For parallelizable modes, such as ECB, CBC decrypt, CTR, higher
111 # performance is achieved by interleaving instructions working on
112 # independent blocks. In which case asymptotic limit for such modes
113 # can be obtained by dividing above mentioned numbers by AES
114 # instructions' interleave factor. Westmere can execute at most 3
115 # instructions at a time, meaning that optimal interleave factor is 3,
116 # and that's where the "magic" number of 1.25 come from. "Optimal
117 # interleave factor" means that increase of interleave factor does
118 # not improve performance. The formula has proven to reflect reality
119 # pretty well on Westmere... Sandy Bridge on the other hand can
120 # execute up to 8 AES instructions at a time, so how does varying
121 # interleave factor affect the performance? Here is table for ECB
122 # (numbers are cycles per byte processed with 128-bit key):
124 # instruction interleave factor 3x 6x 8x
125 # theoretical asymptotic limit 1.67 0.83 0.625
126 # measured performance for 8KB block 1.05 0.86 0.84
128 # "as if" interleave factor 4.7x 5.8x 6.0x
130 # Further data for other parallelizable modes:
132 # CBC decrypt 1.16 0.93 0.74
135 # Well, given 3x column it's probably inappropriate to call the limit
136 # asymptotic, if it can be surpassed, isn't it? What happens there?
137 # Rewind to CBC paragraph for the answer. Yes, out-of-order execution
138 # magic is responsible for this. Processor overlaps not only the
139 # additional instructions with AES ones, but even AES instuctions
140 # processing adjacent triplets of independent blocks. In the 6x case
141 # additional instructions still claim disproportionally small amount
142 # of additional cycles, but in 8x case number of instructions must be
143 # a tad too high for out-of-order logic to cope with, and AES unit
144 # remains underutilized... As you can see 8x interleave is hardly
145 # justifiable, so there no need to feel bad that 32-bit aesni-x86.pl
146 # utilizies 6x interleave because of limited register bank capacity.
148 # Higher interleave factors do have negative impact on Westmere
149 # performance. While for ECB mode it's negligible ~1.5%, other
150 # parallelizables perform ~5% worse, which is outweighed by ~25%
151 # improvement on Sandy Bridge. To balance regression on Westmere
152 # CTR mode was implemented with 6x aesenc interleave factor.
156 # Add aesni_xts_[en|de]crypt. Westmere spends 1.25 cycles processing
157 # one byte out of 8KB with 128-bit key, Sandy Bridge - 0.90. Just like
158 # in CTR mode AES instruction interleave factor was chosen to be 6x.
160 ######################################################################
161 # For reference, AMD Bulldozer spends 5.77 cycles per byte processed
162 # with 128-bit key in CBC encrypt and 0.70 cycles in CBC decrypt, 0.70
163 # in ECB, 0.71 in CTR, 0.90 in XTS... This means that aes[enc|dec]
164 # instruction latency is 9 cycles and that they can be issued every
167 $PREFIX="aesni"; # if $PREFIX is set to "AES", the script
168 # generates drop-in replacement for
169 # crypto/aes/asm/aes-x86_64.pl:-)
173 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
175 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
177 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
178 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
179 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
180 die "can't locate x86_64-xlate.pl";
182 open OUT,"| \"$^X\" $xlate $flavour $output";
185 $movkey = $PREFIX eq "aesni" ? "movups" : "movups";
186 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
187 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
191 $rounds="%eax"; # input to and changed by aesni_[en|de]cryptN !!!
192 # this is natural Unix argument order for public $PREFIX_[ecb|cbc]_encrypt ...
196 $key="%rcx"; # input to and changed by aesni_[en|de]cryptN !!!
197 $ivp="%r8"; # cbc, ctr, ...
199 $rnds_="%r10d"; # backup copy for $rounds
200 $key_="%r11"; # backup copy for $key
202 # %xmm register layout
203 $rndkey0="%xmm0"; $rndkey1="%xmm1";
204 $inout0="%xmm2"; $inout1="%xmm3";
205 $inout2="%xmm4"; $inout3="%xmm5";
206 $inout4="%xmm6"; $inout5="%xmm7";
207 $inout6="%xmm8"; $inout7="%xmm9";
209 $in2="%xmm6"; $in1="%xmm7"; # used in CBC decrypt, CTR, ...
210 $in0="%xmm8"; $iv="%xmm9";
212 # Inline version of internal aesni_[en|de]crypt1.
214 # Why folded loop? Because aes[enc|dec] is slow enough to accommodate
215 # cycles which take care of loop variables...
217 sub aesni_generate1 {
218 my ($p,$key,$rounds,$inout,$ivec)=@_; $inout=$inout0 if (!defined($inout));
221 $movkey ($key),$rndkey0
222 $movkey 16($key),$rndkey1
224 $code.=<<___ if (defined($ivec));
229 $code.=<<___ if (!defined($ivec));
231 xorps $rndkey0,$inout
235 aes${p} $rndkey1,$inout
237 $movkey ($key),$rndkey1
239 jnz .Loop_${p}1_$sn # loop body is 16 bytes
240 aes${p}last $rndkey1,$inout
243 # void $PREFIX_[en|de]crypt (const void *inp,void *out,const AES_KEY *key);
245 { my ($inp,$out,$key) = @_4args;
248 .globl ${PREFIX}_encrypt
249 .type ${PREFIX}_encrypt,\@abi-omnipotent
252 movups ($inp),$inout0 # load input
253 mov 240($key),$rounds # key->rounds
255 &aesni_generate1("enc",$key,$rounds);
257 movups $inout0,($out) # output
259 .size ${PREFIX}_encrypt,.-${PREFIX}_encrypt
261 .globl ${PREFIX}_decrypt
262 .type ${PREFIX}_decrypt,\@abi-omnipotent
265 movups ($inp),$inout0 # load input
266 mov 240($key),$rounds # key->rounds
268 &aesni_generate1("dec",$key,$rounds);
270 movups $inout0,($out) # output
272 .size ${PREFIX}_decrypt, .-${PREFIX}_decrypt
276 # _aesni_[en|de]cryptN are private interfaces, N denotes interleave
277 # factor. Why 3x subroutine were originally used in loops? Even though
278 # aes[enc|dec] latency was originally 6, it could be scheduled only
279 # every *2nd* cycle. Thus 3x interleave was the one providing optimal
280 # utilization, i.e. when subroutine's throughput is virtually same as
281 # of non-interleaved subroutine [for number of input blocks up to 3].
282 # This is why it makes no sense to implement 2x subroutine.
283 # aes[enc|dec] latency in next processor generation is 8, but the
284 # instructions can be scheduled every cycle. Optimal interleave for
285 # new processor is therefore 8x...
286 sub aesni_generate3 {
288 # As already mentioned it takes in $key and $rounds, which are *not*
289 # preserved. $inout[0-2] is cipher/clear text...
291 .type _aesni_${dir}rypt3,\@abi-omnipotent
294 $movkey ($key),$rndkey0
296 $movkey 16($key),$rndkey1
298 xorps $rndkey0,$inout0
299 xorps $rndkey0,$inout1
300 xorps $rndkey0,$inout2
301 $movkey ($key),$rndkey0
304 aes${dir} $rndkey1,$inout0
305 aes${dir} $rndkey1,$inout1
307 aes${dir} $rndkey1,$inout2
308 $movkey 16($key),$rndkey1
309 aes${dir} $rndkey0,$inout0
310 aes${dir} $rndkey0,$inout1
312 aes${dir} $rndkey0,$inout2
313 $movkey ($key),$rndkey0
316 aes${dir} $rndkey1,$inout0
317 aes${dir} $rndkey1,$inout1
318 aes${dir} $rndkey1,$inout2
319 aes${dir}last $rndkey0,$inout0
320 aes${dir}last $rndkey0,$inout1
321 aes${dir}last $rndkey0,$inout2
323 .size _aesni_${dir}rypt3,.-_aesni_${dir}rypt3
326 # 4x interleave is implemented to improve small block performance,
327 # most notably [and naturally] 4 block by ~30%. One can argue that one
328 # should have implemented 5x as well, but improvement would be <20%,
329 # so it's not worth it...
330 sub aesni_generate4 {
332 # As already mentioned it takes in $key and $rounds, which are *not*
333 # preserved. $inout[0-3] is cipher/clear text...
335 .type _aesni_${dir}rypt4,\@abi-omnipotent
338 $movkey ($key),$rndkey0
340 $movkey 16($key),$rndkey1
342 xorps $rndkey0,$inout0
343 xorps $rndkey0,$inout1
344 xorps $rndkey0,$inout2
345 xorps $rndkey0,$inout3
346 $movkey ($key),$rndkey0
349 aes${dir} $rndkey1,$inout0
350 aes${dir} $rndkey1,$inout1
352 aes${dir} $rndkey1,$inout2
353 aes${dir} $rndkey1,$inout3
354 $movkey 16($key),$rndkey1
355 aes${dir} $rndkey0,$inout0
356 aes${dir} $rndkey0,$inout1
358 aes${dir} $rndkey0,$inout2
359 aes${dir} $rndkey0,$inout3
360 $movkey ($key),$rndkey0
363 aes${dir} $rndkey1,$inout0
364 aes${dir} $rndkey1,$inout1
365 aes${dir} $rndkey1,$inout2
366 aes${dir} $rndkey1,$inout3
367 aes${dir}last $rndkey0,$inout0
368 aes${dir}last $rndkey0,$inout1
369 aes${dir}last $rndkey0,$inout2
370 aes${dir}last $rndkey0,$inout3
372 .size _aesni_${dir}rypt4,.-_aesni_${dir}rypt4
375 sub aesni_generate6 {
377 # As already mentioned it takes in $key and $rounds, which are *not*
378 # preserved. $inout[0-5] is cipher/clear text...
380 .type _aesni_${dir}rypt6,\@abi-omnipotent
383 $movkey ($key),$rndkey0
385 $movkey 16($key),$rndkey1
387 xorps $rndkey0,$inout0
388 pxor $rndkey0,$inout1
389 aes${dir} $rndkey1,$inout0
390 pxor $rndkey0,$inout2
391 aes${dir} $rndkey1,$inout1
392 pxor $rndkey0,$inout3
393 aes${dir} $rndkey1,$inout2
394 pxor $rndkey0,$inout4
395 aes${dir} $rndkey1,$inout3
396 pxor $rndkey0,$inout5
398 aes${dir} $rndkey1,$inout4
399 $movkey ($key),$rndkey0
400 aes${dir} $rndkey1,$inout5
401 jmp .L${dir}_loop6_enter
404 aes${dir} $rndkey1,$inout0
405 aes${dir} $rndkey1,$inout1
407 aes${dir} $rndkey1,$inout2
408 aes${dir} $rndkey1,$inout3
409 aes${dir} $rndkey1,$inout4
410 aes${dir} $rndkey1,$inout5
411 .L${dir}_loop6_enter: # happens to be 16-byte aligned
412 $movkey 16($key),$rndkey1
413 aes${dir} $rndkey0,$inout0
414 aes${dir} $rndkey0,$inout1
416 aes${dir} $rndkey0,$inout2
417 aes${dir} $rndkey0,$inout3
418 aes${dir} $rndkey0,$inout4
419 aes${dir} $rndkey0,$inout5
420 $movkey ($key),$rndkey0
423 aes${dir} $rndkey1,$inout0
424 aes${dir} $rndkey1,$inout1
425 aes${dir} $rndkey1,$inout2
426 aes${dir} $rndkey1,$inout3
427 aes${dir} $rndkey1,$inout4
428 aes${dir} $rndkey1,$inout5
429 aes${dir}last $rndkey0,$inout0
430 aes${dir}last $rndkey0,$inout1
431 aes${dir}last $rndkey0,$inout2
432 aes${dir}last $rndkey0,$inout3
433 aes${dir}last $rndkey0,$inout4
434 aes${dir}last $rndkey0,$inout5
436 .size _aesni_${dir}rypt6,.-_aesni_${dir}rypt6
439 sub aesni_generate8 {
441 # As already mentioned it takes in $key and $rounds, which are *not*
442 # preserved. $inout[0-7] is cipher/clear text...
444 .type _aesni_${dir}rypt8,\@abi-omnipotent
447 $movkey ($key),$rndkey0
449 $movkey 16($key),$rndkey1
451 xorps $rndkey0,$inout0
452 xorps $rndkey0,$inout1
453 aes${dir} $rndkey1,$inout0
454 pxor $rndkey0,$inout2
455 aes${dir} $rndkey1,$inout1
456 pxor $rndkey0,$inout3
457 aes${dir} $rndkey1,$inout2
458 pxor $rndkey0,$inout4
459 aes${dir} $rndkey1,$inout3
460 pxor $rndkey0,$inout5
462 aes${dir} $rndkey1,$inout4
463 pxor $rndkey0,$inout6
464 aes${dir} $rndkey1,$inout5
465 pxor $rndkey0,$inout7
466 $movkey ($key),$rndkey0
467 aes${dir} $rndkey1,$inout6
468 aes${dir} $rndkey1,$inout7
469 $movkey 16($key),$rndkey1
470 jmp .L${dir}_loop8_enter
473 aes${dir} $rndkey1,$inout0
474 aes${dir} $rndkey1,$inout1
476 aes${dir} $rndkey1,$inout2
477 aes${dir} $rndkey1,$inout3
478 aes${dir} $rndkey1,$inout4
479 aes${dir} $rndkey1,$inout5
480 aes${dir} $rndkey1,$inout6
481 aes${dir} $rndkey1,$inout7
482 $movkey 16($key),$rndkey1
483 .L${dir}_loop8_enter: # happens to be 16-byte aligned
484 aes${dir} $rndkey0,$inout0
485 aes${dir} $rndkey0,$inout1
487 aes${dir} $rndkey0,$inout2
488 aes${dir} $rndkey0,$inout3
489 aes${dir} $rndkey0,$inout4
490 aes${dir} $rndkey0,$inout5
491 aes${dir} $rndkey0,$inout6
492 aes${dir} $rndkey0,$inout7
493 $movkey ($key),$rndkey0
496 aes${dir} $rndkey1,$inout0
497 aes${dir} $rndkey1,$inout1
498 aes${dir} $rndkey1,$inout2
499 aes${dir} $rndkey1,$inout3
500 aes${dir} $rndkey1,$inout4
501 aes${dir} $rndkey1,$inout5
502 aes${dir} $rndkey1,$inout6
503 aes${dir} $rndkey1,$inout7
504 aes${dir}last $rndkey0,$inout0
505 aes${dir}last $rndkey0,$inout1
506 aes${dir}last $rndkey0,$inout2
507 aes${dir}last $rndkey0,$inout3
508 aes${dir}last $rndkey0,$inout4
509 aes${dir}last $rndkey0,$inout5
510 aes${dir}last $rndkey0,$inout6
511 aes${dir}last $rndkey0,$inout7
513 .size _aesni_${dir}rypt8,.-_aesni_${dir}rypt8
516 &aesni_generate3("enc") if ($PREFIX eq "aesni");
517 &aesni_generate3("dec");
518 &aesni_generate4("enc") if ($PREFIX eq "aesni");
519 &aesni_generate4("dec");
520 &aesni_generate6("enc") if ($PREFIX eq "aesni");
521 &aesni_generate6("dec");
522 &aesni_generate8("enc") if ($PREFIX eq "aesni");
523 &aesni_generate8("dec");
525 if ($PREFIX eq "aesni") {
526 ########################################################################
527 # void aesni_ecb_encrypt (const void *in, void *out,
528 # size_t length, const AES_KEY *key,
531 .globl aesni_ecb_encrypt
532 .type aesni_ecb_encrypt,\@function,5
538 mov 240($key),$rounds # key->rounds
539 $movkey ($key),$rndkey0
540 mov $key,$key_ # backup $key
541 mov $rounds,$rnds_ # backup $rounds
542 test %r8d,%r8d # 5th argument
544 #--------------------------- ECB ENCRYPT ------------------------------#
548 movdqu ($inp),$inout0
549 movdqu 0x10($inp),$inout1
550 movdqu 0x20($inp),$inout2
551 movdqu 0x30($inp),$inout3
552 movdqu 0x40($inp),$inout4
553 movdqu 0x50($inp),$inout5
554 movdqu 0x60($inp),$inout6
555 movdqu 0x70($inp),$inout7
558 jmp .Lecb_enc_loop8_enter
561 movups $inout0,($out)
562 mov $key_,$key # restore $key
563 movdqu ($inp),$inout0
564 mov $rnds_,$rounds # restore $rounds
565 movups $inout1,0x10($out)
566 movdqu 0x10($inp),$inout1
567 movups $inout2,0x20($out)
568 movdqu 0x20($inp),$inout2
569 movups $inout3,0x30($out)
570 movdqu 0x30($inp),$inout3
571 movups $inout4,0x40($out)
572 movdqu 0x40($inp),$inout4
573 movups $inout5,0x50($out)
574 movdqu 0x50($inp),$inout5
575 movups $inout6,0x60($out)
576 movdqu 0x60($inp),$inout6
577 movups $inout7,0x70($out)
579 movdqu 0x70($inp),$inout7
581 .Lecb_enc_loop8_enter:
588 movups $inout0,($out)
589 mov $key_,$key # restore $key
590 movups $inout1,0x10($out)
591 mov $rnds_,$rounds # restore $rounds
592 movups $inout2,0x20($out)
593 movups $inout3,0x30($out)
594 movups $inout4,0x40($out)
595 movups $inout5,0x50($out)
596 movups $inout6,0x60($out)
597 movups $inout7,0x70($out)
603 movups ($inp),$inout0
606 movups 0x10($inp),$inout1
608 movups 0x20($inp),$inout2
611 movups 0x30($inp),$inout3
613 movups 0x40($inp),$inout4
616 movups 0x50($inp),$inout5
618 movdqu 0x60($inp),$inout6
620 movups $inout0,($out)
621 movups $inout1,0x10($out)
622 movups $inout2,0x20($out)
623 movups $inout3,0x30($out)
624 movups $inout4,0x40($out)
625 movups $inout5,0x50($out)
626 movups $inout6,0x60($out)
631 &aesni_generate1("enc",$key,$rounds);
633 movups $inout0,($out)
637 xorps $inout2,$inout2
639 movups $inout0,($out)
640 movups $inout1,0x10($out)
645 movups $inout0,($out)
646 movups $inout1,0x10($out)
647 movups $inout2,0x20($out)
652 movups $inout0,($out)
653 movups $inout1,0x10($out)
654 movups $inout2,0x20($out)
655 movups $inout3,0x30($out)
659 xorps $inout5,$inout5
661 movups $inout0,($out)
662 movups $inout1,0x10($out)
663 movups $inout2,0x20($out)
664 movups $inout3,0x30($out)
665 movups $inout4,0x40($out)
670 movups $inout0,($out)
671 movups $inout1,0x10($out)
672 movups $inout2,0x20($out)
673 movups $inout3,0x30($out)
674 movups $inout4,0x40($out)
675 movups $inout5,0x50($out)
677 \f#--------------------------- ECB DECRYPT ------------------------------#
683 movdqu ($inp),$inout0
684 movdqu 0x10($inp),$inout1
685 movdqu 0x20($inp),$inout2
686 movdqu 0x30($inp),$inout3
687 movdqu 0x40($inp),$inout4
688 movdqu 0x50($inp),$inout5
689 movdqu 0x60($inp),$inout6
690 movdqu 0x70($inp),$inout7
693 jmp .Lecb_dec_loop8_enter
696 movups $inout0,($out)
697 mov $key_,$key # restore $key
698 movdqu ($inp),$inout0
699 mov $rnds_,$rounds # restore $rounds
700 movups $inout1,0x10($out)
701 movdqu 0x10($inp),$inout1
702 movups $inout2,0x20($out)
703 movdqu 0x20($inp),$inout2
704 movups $inout3,0x30($out)
705 movdqu 0x30($inp),$inout3
706 movups $inout4,0x40($out)
707 movdqu 0x40($inp),$inout4
708 movups $inout5,0x50($out)
709 movdqu 0x50($inp),$inout5
710 movups $inout6,0x60($out)
711 movdqu 0x60($inp),$inout6
712 movups $inout7,0x70($out)
714 movdqu 0x70($inp),$inout7
716 .Lecb_dec_loop8_enter:
720 $movkey ($key_),$rndkey0
724 movups $inout0,($out)
725 mov $key_,$key # restore $key
726 movups $inout1,0x10($out)
727 mov $rnds_,$rounds # restore $rounds
728 movups $inout2,0x20($out)
729 movups $inout3,0x30($out)
730 movups $inout4,0x40($out)
731 movups $inout5,0x50($out)
732 movups $inout6,0x60($out)
733 movups $inout7,0x70($out)
739 movups ($inp),$inout0
742 movups 0x10($inp),$inout1
744 movups 0x20($inp),$inout2
747 movups 0x30($inp),$inout3
749 movups 0x40($inp),$inout4
752 movups 0x50($inp),$inout5
754 movups 0x60($inp),$inout6
755 $movkey ($key),$rndkey0
757 movups $inout0,($out)
758 movups $inout1,0x10($out)
759 movups $inout2,0x20($out)
760 movups $inout3,0x30($out)
761 movups $inout4,0x40($out)
762 movups $inout5,0x50($out)
763 movups $inout6,0x60($out)
768 &aesni_generate1("dec",$key,$rounds);
770 movups $inout0,($out)
774 xorps $inout2,$inout2
776 movups $inout0,($out)
777 movups $inout1,0x10($out)
782 movups $inout0,($out)
783 movups $inout1,0x10($out)
784 movups $inout2,0x20($out)
789 movups $inout0,($out)
790 movups $inout1,0x10($out)
791 movups $inout2,0x20($out)
792 movups $inout3,0x30($out)
796 xorps $inout5,$inout5
798 movups $inout0,($out)
799 movups $inout1,0x10($out)
800 movups $inout2,0x20($out)
801 movups $inout3,0x30($out)
802 movups $inout4,0x40($out)
807 movups $inout0,($out)
808 movups $inout1,0x10($out)
809 movups $inout2,0x20($out)
810 movups $inout3,0x30($out)
811 movups $inout4,0x40($out)
812 movups $inout5,0x50($out)
816 .size aesni_ecb_encrypt,.-aesni_ecb_encrypt
820 ######################################################################
821 # void aesni_ccm64_[en|de]crypt_blocks (const void *in, void *out,
822 # size_t blocks, const AES_KEY *key,
823 # const char *ivec,char *cmac);
825 # Handles only complete blocks, operates on 64-bit counter and
826 # does not update *ivec! Nor does it finalize CMAC value
827 # (see engine/eng_aesni.c for details)
830 my $cmac="%r9"; # 6th argument
832 my $increment="%xmm6";
833 my $bswap_mask="%xmm7";
836 .globl aesni_ccm64_encrypt_blocks
837 .type aesni_ccm64_encrypt_blocks,\@function,6
839 aesni_ccm64_encrypt_blocks:
841 $code.=<<___ if ($win64);
844 movaps %xmm7,0x10(%rsp)
845 movaps %xmm8,0x20(%rsp)
846 movaps %xmm9,0x30(%rsp)
850 mov 240($key),$rounds # key->rounds
852 movdqa .Lincrement64(%rip),$increment
853 movdqa .Lbswap_mask(%rip),$bswap_mask
857 movdqu ($cmac),$inout1
860 pshufb $bswap_mask,$iv
861 jmp .Lccm64_enc_outer
864 $movkey ($key_),$rndkey0
866 movups ($inp),$in0 # load inp
868 xorps $rndkey0,$inout0 # counter
869 $movkey 16($key_),$rndkey1
872 xorps $rndkey0,$inout1 # cmac^=inp
873 $movkey ($key),$rndkey0
876 aesenc $rndkey1,$inout0
878 aesenc $rndkey1,$inout1
879 $movkey 16($key),$rndkey1
880 aesenc $rndkey0,$inout0
882 aesenc $rndkey0,$inout1
883 $movkey 0($key),$rndkey0
884 jnz .Lccm64_enc2_loop
885 aesenc $rndkey1,$inout0
886 aesenc $rndkey1,$inout1
888 aesenclast $rndkey0,$inout0
889 aesenclast $rndkey0,$inout1
893 xorps $inout0,$in0 # inp ^= E(iv)
895 movups $in0,($out) # save output
897 pshufb $bswap_mask,$inout0
898 jnz .Lccm64_enc_outer
900 movups $inout1,($cmac)
902 $code.=<<___ if ($win64);
904 movaps 0x10(%rsp),%xmm7
905 movaps 0x20(%rsp),%xmm8
906 movaps 0x30(%rsp),%xmm9
912 .size aesni_ccm64_encrypt_blocks,.-aesni_ccm64_encrypt_blocks
914 ######################################################################
916 .globl aesni_ccm64_decrypt_blocks
917 .type aesni_ccm64_decrypt_blocks,\@function,6
919 aesni_ccm64_decrypt_blocks:
921 $code.=<<___ if ($win64);
924 movaps %xmm7,0x10(%rsp)
925 movaps %xmm8,0x20(%rsp)
926 movaps %xmm9,0x30(%rsp)
930 mov 240($key),$rounds # key->rounds
932 movdqu ($cmac),$inout1
933 movdqa .Lincrement64(%rip),$increment
934 movdqa .Lbswap_mask(%rip),$bswap_mask
939 pshufb $bswap_mask,$iv
941 &aesni_generate1("enc",$key,$rounds);
943 movups ($inp),$in0 # load inp
946 jmp .Lccm64_dec_outer
949 xorps $inout0,$in0 # inp ^= E(iv)
952 movups $in0,($out) # save output
954 pshufb $bswap_mask,$inout0
959 $movkey ($key_),$rndkey0
961 $movkey 16($key_),$rndkey1
964 xorps $rndkey0,$inout0
965 xorps $in0,$inout1 # cmac^=out
966 $movkey ($key),$rndkey0
969 aesenc $rndkey1,$inout0
971 aesenc $rndkey1,$inout1
972 $movkey 16($key),$rndkey1
973 aesenc $rndkey0,$inout0
975 aesenc $rndkey0,$inout1
976 $movkey 0($key),$rndkey0
977 jnz .Lccm64_dec2_loop
978 movups ($inp),$in0 # load inp
980 aesenc $rndkey1,$inout0
981 aesenc $rndkey1,$inout1
983 aesenclast $rndkey0,$inout0
984 aesenclast $rndkey0,$inout1
985 jmp .Lccm64_dec_outer
989 #xorps $in0,$inout1 # cmac^=out
991 &aesni_generate1("enc",$key_,$rounds,$inout1,$in0);
993 movups $inout1,($cmac)
995 $code.=<<___ if ($win64);
997 movaps 0x10(%rsp),%xmm7
998 movaps 0x20(%rsp),%xmm8
999 movaps 0x30(%rsp),%xmm9
1005 .size aesni_ccm64_decrypt_blocks,.-aesni_ccm64_decrypt_blocks
1008 ######################################################################
1009 # void aesni_ctr32_encrypt_blocks (const void *in, void *out,
1010 # size_t blocks, const AES_KEY *key,
1011 # const char *ivec);
1013 # Handles only complete blocks, operates on 32-bit counter and
1014 # does not update *ivec! (see crypto/modes/ctr128.c for details)
1016 # Overhaul based on suggestions from Shay Gueron and Vlad Krasnov,
1017 # http://rt.openssl.org/Ticket/Display.html?id=3021&user=guest&pass=guest.
1018 # Keywords are full unroll and modulo-schedule counter calculations
1019 # with zero-round key xor.
1021 my ($in0,$in1,$in2,$in3,$in4,$in5)=map("%xmm$_",(10..15));
1022 my ($key0,$ctr)=("${key_}d","${ivp}d");
1023 my $frame_size = 0x80 + ($win64?160:0);
1026 .globl aesni_ctr32_encrypt_blocks
1027 .type aesni_ctr32_encrypt_blocks,\@function,5
1029 aesni_ctr32_encrypt_blocks:
1032 sub \$$frame_size,%rsp
1033 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1035 $code.=<<___ if ($win64);
1036 movaps %xmm6,-0xa8(%rax)
1037 movaps %xmm7,-0x98(%rax)
1038 movaps %xmm8,-0x88(%rax)
1039 movaps %xmm9,-0x78(%rax)
1040 movaps %xmm10,-0x68(%rax)
1041 movaps %xmm11,-0x58(%rax)
1042 movaps %xmm12,-0x48(%rax)
1043 movaps %xmm13,-0x38(%rax)
1044 movaps %xmm14,-0x28(%rax)
1045 movaps %xmm15,-0x18(%rax)
1052 je .Lctr32_one_shortcut
1054 movdqu ($ivp),$inout0
1055 movdqu ($key),$rndkey0
1056 mov 12($ivp),$ctr # counter LSB
1057 pxor $rndkey0,$inout0
1058 mov 12($key),$key0 # 0-round key LSB
1059 movdqa $inout0,0x00(%rsp) # populate counter block
1061 movdqa $inout0,$inout1
1062 movdqa $inout0,$inout2
1063 movdqa $inout0,$inout3
1064 movdqa $inout0,0x40(%rsp)
1065 movdqa $inout0,0x50(%rsp)
1066 movdqa $inout0,0x60(%rsp)
1067 movdqa $inout0,0x70(%rsp)
1069 mov 240($key),$rounds # key->rounds
1077 pinsrd \$3,%r9d,$inout1
1079 movdqa $inout1,0x10(%rsp)
1080 pinsrd \$3,%r10d,$inout2
1083 movdqa $inout2,0x20(%rsp)
1086 pinsrd \$3,%r9d,$inout3
1088 movdqa $inout3,0x30(%rsp)
1090 mov %r10d,0x40+12(%rsp)
1095 mov %r9d,0x50+12(%rsp)
1098 mov %r10d,0x60+12(%rsp)
1101 mov %r9d,0x70+12(%rsp)
1103 $movkey 0x10($key),$rndkey1
1105 movdqa 0x40(%rsp),$inout4
1106 movdqa 0x50(%rsp),$inout5
1111 lea 0x80($key),$key # size optimization
1118 movdqa 0x60(%rsp),$inout6
1119 aesenc $rndkey1,$inout0
1121 movdqa 0x70(%rsp),$inout7
1122 aesenc $rndkey1,$inout1
1124 $movkey 0x20-0x80($key),$rndkey0
1125 aesenc $rndkey1,$inout2
1127 aesenc $rndkey1,$inout3
1128 mov %r9d,0x00+12(%rsp)
1130 aesenc $rndkey1,$inout4
1131 aesenc $rndkey1,$inout5
1132 aesenc $rndkey1,$inout6
1133 aesenc $rndkey1,$inout7
1134 $movkey 0x30-0x80($key),$rndkey1
1136 for($i=2;$i<8;$i++) {
1137 my $rndkeyx = ($i&1)?$rndkey1:$rndkey0;
1139 aesenc $rndkeyx,$inout0
1140 aesenc $rndkeyx,$inout1
1142 aesenc $rndkeyx,$inout2
1144 aesenc $rndkeyx,$inout3
1145 mov %r9d,`0x10*($i-1)`+12(%rsp)
1147 aesenc $rndkeyx,$inout4
1148 aesenc $rndkeyx,$inout5
1149 aesenc $rndkeyx,$inout6
1150 aesenc $rndkeyx,$inout7
1151 $movkey `0x20+0x10*$i`-0x80($key),$rndkeyx
1155 aesenc $rndkey0,$inout0
1156 aesenc $rndkey0,$inout1
1158 aesenc $rndkey0,$inout2
1160 aesenc $rndkey0,$inout3
1161 mov %r9d,0x70+12(%rsp)
1162 aesenc $rndkey0,$inout4
1163 aesenc $rndkey0,$inout5
1164 aesenc $rndkey0,$inout6
1165 movdqu 0x00($inp),$in0
1166 aesenc $rndkey0,$inout7
1167 $movkey 0xa0-0x80($key),$rndkey0
1172 aesenc $rndkey1,$inout0
1173 aesenc $rndkey1,$inout1
1174 aesenc $rndkey1,$inout2
1175 aesenc $rndkey1,$inout3
1176 aesenc $rndkey1,$inout4
1177 aesenc $rndkey1,$inout5
1178 aesenc $rndkey1,$inout6
1179 aesenc $rndkey1,$inout7
1180 $movkey 0xb0-0x80($key),$rndkey1
1182 aesenc $rndkey0,$inout0
1183 aesenc $rndkey0,$inout1
1184 aesenc $rndkey0,$inout2
1185 aesenc $rndkey0,$inout3
1186 aesenc $rndkey0,$inout4
1187 aesenc $rndkey0,$inout5
1188 aesenc $rndkey0,$inout6
1189 aesenc $rndkey0,$inout7
1190 $movkey 0xc0-0x80($key),$rndkey0
1193 aesenc $rndkey1,$inout0
1194 aesenc $rndkey1,$inout1
1195 aesenc $rndkey1,$inout2
1196 aesenc $rndkey1,$inout3
1197 aesenc $rndkey1,$inout4
1198 aesenc $rndkey1,$inout5
1199 aesenc $rndkey1,$inout6
1200 aesenc $rndkey1,$inout7
1201 $movkey 0xd0-0x80($key),$rndkey1
1203 aesenc $rndkey0,$inout0
1204 aesenc $rndkey0,$inout1
1205 aesenc $rndkey0,$inout2
1206 aesenc $rndkey0,$inout3
1207 aesenc $rndkey0,$inout4
1208 aesenc $rndkey0,$inout5
1209 aesenc $rndkey0,$inout6
1210 aesenc $rndkey0,$inout7
1211 $movkey 0xe0-0x80($key),$rndkey0
1214 movdqu 0x10($inp),$in1
1216 movdqu 0x20($inp),$in2
1218 movdqu 0x30($inp),$in3
1220 movdqu 0x40($inp),$in4
1222 movdqu 0x50($inp),$in5
1224 aesenc $rndkey1,$inout0
1226 aesenc $rndkey1,$inout1
1227 aesenc $rndkey1,$inout2
1228 aesenc $rndkey1,$inout3
1229 aesenc $rndkey1,$inout4
1230 aesenc $rndkey1,$inout5
1231 aesenc $rndkey1,$inout6
1232 aesenc $rndkey1,$inout7
1233 movdqu 0x60($inp),$rndkey1
1235 aesenclast $in0,$inout0
1236 pxor $rndkey0,$rndkey1
1237 movdqu 0x70($inp),$in0
1239 aesenclast $in1,$inout1
1241 movdqa 0x00(%rsp),$in1 # load next counter block
1242 aesenclast $in2,$inout2
1243 movdqa 0x10(%rsp),$in2
1244 aesenclast $in3,$inout3
1245 movdqa 0x20(%rsp),$in3
1246 aesenclast $in4,$inout4
1247 movdqa 0x30(%rsp),$in4
1248 aesenclast $in5,$inout5
1249 movdqa 0x40(%rsp),$in5
1250 aesenclast $rndkey1,$inout6
1251 movdqa 0x50(%rsp),$rndkey0
1252 aesenclast $in0,$inout7
1253 $movkey 0x10-0x80($key),$rndkey1
1255 movups $inout0,($out) # store output
1257 movups $inout1,0x10($out)
1259 movups $inout2,0x20($out)
1261 movups $inout3,0x30($out)
1263 movups $inout4,0x40($out)
1265 movups $inout5,0x50($out)
1266 movdqa $rndkey0,$inout5
1267 movups $inout6,0x60($out)
1268 movups $inout7,0x70($out)
1276 lea -0x80($key),$key
1284 movdqa 0x60(%rsp),$inout6
1285 pxor $inout7,$inout7
1287 $movkey 16($key),$rndkey0
1288 aesenc $rndkey1,$inout0
1290 aesenc $rndkey1,$inout1
1292 aesenc $rndkey1,$inout2
1294 aesenc $rndkey1,$inout3
1296 aesenc $rndkey1,$inout4
1297 movups 0x10($inp),$in1
1298 aesenc $rndkey1,$inout5
1299 movups 0x20($inp),$in2
1300 aesenc $rndkey1,$inout6
1301 $movkey 16($key),$rndkey1
1303 call .Lenc_loop8_enter
1305 movdqu 0x30($inp),$in3
1307 movdqu 0x40($inp),$in0
1309 movdqu $inout0,($out)
1311 movdqu $inout1,0x10($out)
1313 movdqu $inout2,0x20($out)
1315 movdqu $inout3,0x30($out)
1316 movdqu $inout4,0x40($out)
1320 movups 0x50($inp),$in1
1322 movups $inout5,0x50($out)
1325 movups 0x60($inp),$in2
1327 movups $inout6,0x60($out)
1332 aesenc $rndkey1,$inout0
1334 aesenc $rndkey1,$inout1
1335 aesenc $rndkey1,$inout2
1336 aesenc $rndkey1,$inout3
1337 $movkey ($key),$rndkey1
1340 aesenclast $rndkey1,$inout0
1342 aesenclast $rndkey1,$inout1
1343 movups 0x10($inp),$in1
1344 aesenclast $rndkey1,$inout2
1345 movups 0x20($inp),$in2
1346 aesenclast $rndkey1,$inout3
1347 movups 0x30($inp),$in3
1350 movups $inout0,($out)
1352 movups $inout1,0x10($out)
1354 movdqu $inout2,0x20($out)
1356 movdqu $inout3,0x30($out)
1361 aesenc $rndkey1,$inout0
1363 aesenc $rndkey1,$inout1
1364 aesenc $rndkey1,$inout2
1365 $movkey ($key),$rndkey1
1368 aesenclast $rndkey1,$inout0
1369 aesenclast $rndkey1,$inout1
1370 aesenclast $rndkey1,$inout2
1374 movups $inout0,($out)
1378 movups 0x10($inp),$in1
1380 movups $inout1,0x10($out)
1383 movups 0x20($inp),$in2
1385 movups $inout2,0x20($out)
1389 .Lctr32_one_shortcut:
1390 movups ($ivp),$inout0
1392 mov 240($key),$rounds # key->rounds
1394 &aesni_generate1("enc",$key,$rounds);
1397 movups $inout0,($out)
1403 $code.=<<___ if ($win64);
1404 movaps -0xa0(%rbp),%xmm6
1405 movaps -0x90(%rbp),%xmm7
1406 movaps -0x80(%rbp),%xmm8
1407 movaps -0x70(%rbp),%xmm9
1408 movaps -0x60(%rbp),%xmm10
1409 movaps -0x50(%rbp),%xmm11
1410 movaps -0x40(%rbp),%xmm12
1411 movaps -0x30(%rbp),%xmm13
1412 movaps -0x20(%rbp),%xmm14
1413 movaps -0x10(%rbp),%xmm15
1420 .size aesni_ctr32_encrypt_blocks,.-aesni_ctr32_encrypt_blocks
1424 ######################################################################
1425 # void aesni_xts_[en|de]crypt(const char *inp,char *out,size_t len,
1426 # const AES_KEY *key1, const AES_KEY *key2
1427 # const unsigned char iv[16]);
1430 my @tweak=map("%xmm$_",(10..15));
1431 my ($twmask,$twres,$twtmp)=("%xmm8","%xmm9",@tweak[4]);
1432 my ($key2,$ivp,$len_)=("%r8","%r9","%r9");
1433 my $frame_size = 0x70 + ($win64?160:0);
1436 .globl aesni_xts_encrypt
1437 .type aesni_xts_encrypt,\@function,6
1442 sub \$$frame_size,%rsp
1443 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1445 $code.=<<___ if ($win64);
1446 movaps %xmm6,-0xa8(%rax)
1447 movaps %xmm7,-0x98(%rax)
1448 movaps %xmm8,-0x88(%rax)
1449 movaps %xmm9,-0x78(%rax)
1450 movaps %xmm10,-0x68(%rax)
1451 movaps %xmm11,-0x58(%rax)
1452 movaps %xmm12,-0x48(%rax)
1453 movaps %xmm13,-0x38(%rax)
1454 movaps %xmm14,-0x28(%rax)
1455 movaps %xmm15,-0x18(%rax)
1460 movups ($ivp),@tweak[5] # load clear-text tweak
1461 mov 240(%r8),$rounds # key2->rounds
1462 mov 240($key),$rnds_ # key1->rounds
1464 # generate the tweak
1465 &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
1467 $movkey ($key),$rndkey0 # zero round key
1468 mov $key,$key_ # backup $key
1469 mov $rnds_,$rounds # backup $rounds
1471 mov $len,$len_ # backup $len
1474 $movkey 16($key,$rnds_),$rndkey1 # last round key
1477 movdqa .Lxts_magic(%rip),$twmask
1478 pshufd \$0x5f,@tweak[5],$twres
1479 pxor $rndkey0,$rndkey1
1481 # alternative tweak calculation algorithm is based on suggestions
1482 # by Shay Gueron. psrad doesn't conflict with AES-NI instructions
1483 # and should help in the future...
1484 for ($i=0;$i<4;$i++) {
1486 movdqa $twres,$twtmp
1488 movdqa @tweak[5],@tweak[$i]
1489 psrad \$31,$twtmp # broadcast upper bits
1490 paddq @tweak[5],@tweak[5]
1492 pxor $rndkey0,@tweak[$i]
1493 pxor $twtmp,@tweak[5]
1497 movdqa @tweak[5],@tweak[4]
1499 paddq @tweak[5],@tweak[5]
1501 pxor $rndkey0,@tweak[4]
1502 pxor $twres,@tweak[5]
1503 movaps $rndkey1,0x60(%rsp) # save round[0]^round[last]
1510 $movkey 16($key_),$rndkey1
1512 lea .Lxts_magic(%rip),%r8
1513 jmp .Lxts_enc_grandloop
1516 .Lxts_enc_grandloop:
1517 movdqu `16*0`($inp),$inout0 # load input
1518 movdqa $rndkey0,$twmask
1519 movdqu `16*1`($inp),$inout1
1520 pxor @tweak[0],$inout0
1521 movdqu `16*2`($inp),$inout2
1522 pxor @tweak[1],$inout1
1523 aesenc $rndkey1,$inout0
1524 movdqu `16*3`($inp),$inout3
1525 pxor @tweak[2],$inout2
1526 aesenc $rndkey1,$inout1
1527 movdqu `16*4`($inp),$inout4
1528 pxor @tweak[3],$inout3
1529 aesenc $rndkey1,$inout2
1530 movdqu `16*5`($inp),$inout5
1531 pxor @tweak[5],$twmask # round[0]^=tweak[5]
1532 movdqa 0x60(%rsp),$twres # load round[0]^round[last]
1533 pxor @tweak[4],$inout4
1534 aesenc $rndkey1,$inout3
1535 $movkey 32($key_),$rndkey0
1536 lea `16*6`($inp),$inp
1537 pxor $twmask,$inout5
1539 pxor $twres,@tweak[0]
1540 aesenc $rndkey1,$inout4
1541 pxor $twres,@tweak[1]
1542 movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
1543 aesenc $rndkey1,$inout5
1544 $movkey 48($key_),$rndkey1
1546 aesenc $rndkey0,$inout0
1547 pxor $twres,@tweak[2]
1548 movdqa @tweak[1],`16*1`(%rsp)
1549 aesenc $rndkey0,$inout1
1550 pxor $twres,@tweak[3]
1551 movdqa @tweak[2],`16*2`(%rsp)
1552 aesenc $rndkey0,$inout2
1553 pxor $twres,@tweak[4]
1554 aesenc $rndkey0,$inout3
1556 movdqa @tweak[4],`16*4`(%rsp)
1557 aesenc $rndkey0,$inout4
1558 movdqa $twmask,`16*5`(%rsp)
1559 aesenc $rndkey0,$inout5
1560 $movkey 64($key_),$rndkey0
1562 pshufd \$0x5f,@tweak[5],$twres
1566 aesenc $rndkey1,$inout0
1567 aesenc $rndkey1,$inout1
1568 aesenc $rndkey1,$inout2
1569 aesenc $rndkey1,$inout3
1570 aesenc $rndkey1,$inout4
1571 aesenc $rndkey1,$inout5
1572 $movkey 16($key),$rndkey1
1575 aesenc $rndkey0,$inout0
1576 aesenc $rndkey0,$inout1
1577 aesenc $rndkey0,$inout2
1578 aesenc $rndkey0,$inout3
1579 aesenc $rndkey0,$inout4
1580 aesenc $rndkey0,$inout5
1581 $movkey ($key),$rndkey0
1585 movdqa (%r8),$twmask
1586 movdqa $twres,$twtmp
1588 aesenc $rndkey1,$inout0
1589 paddq @tweak[5],@tweak[5]
1591 aesenc $rndkey1,$inout1
1593 $movkey ($key_),@tweak[0] # load round[0]
1594 aesenc $rndkey1,$inout2
1595 aesenc $rndkey1,$inout3
1596 pxor $twtmp,@tweak[5]
1597 aesenc $rndkey1,$inout4
1598 movaps @tweak[0],@tweak[1] # copy round[0]
1599 aesenc $rndkey1,$inout5
1600 $movkey 16($key),$rndkey1
1602 movdqa $twres,$twtmp
1604 aesenc $rndkey0,$inout0
1605 pxor @tweak[5],@tweak[0]
1607 aesenc $rndkey0,$inout1
1608 paddq @tweak[5],@tweak[5]
1610 aesenc $rndkey0,$inout2
1611 aesenc $rndkey0,$inout3
1612 pxor $twtmp,@tweak[5]
1613 aesenc $rndkey0,$inout4
1614 movaps @tweak[1],@tweak[2]
1615 aesenc $rndkey0,$inout5
1616 $movkey 32($key),$rndkey0
1618 movdqa $twres,$twtmp
1620 aesenc $rndkey1,$inout0
1621 pxor @tweak[5],@tweak[1]
1623 aesenc $rndkey1,$inout1
1624 paddq @tweak[5],@tweak[5]
1626 aesenc $rndkey1,$inout2
1627 movdqa @tweak[3],`16*3`(%rsp)
1628 aesenc $rndkey1,$inout3
1629 pxor $twtmp,@tweak[5]
1630 aesenc $rndkey1,$inout4
1631 movaps @tweak[2],@tweak[3]
1632 aesenc $rndkey1,$inout5
1633 $movkey 48($key),$rndkey1
1635 movdqa $twres,$twtmp
1637 aesenc $rndkey0,$inout0
1638 pxor @tweak[5],@tweak[2]
1640 aesenc $rndkey0,$inout1
1641 paddq @tweak[5],@tweak[5]
1643 aesenc $rndkey0,$inout2
1644 aesenc $rndkey0,$inout3
1645 pxor $twtmp,@tweak[5]
1646 aesenc $rndkey0,$inout4
1647 movaps @tweak[3],@tweak[4]
1648 aesenc $rndkey0,$inout5
1650 movdqa $twres,$rndkey0
1652 aesenc $rndkey1,$inout0
1653 pxor @tweak[5],@tweak[3]
1655 aesenc $rndkey1,$inout1
1656 paddq @tweak[5],@tweak[5]
1657 pand $twmask,$rndkey0
1658 aesenc $rndkey1,$inout2
1659 aesenc $rndkey1,$inout3
1660 pxor $rndkey0,@tweak[5]
1661 $movkey ($key_),$rndkey0
1662 aesenc $rndkey1,$inout4
1663 aesenc $rndkey1,$inout5
1664 $movkey 16($key_),$rndkey1
1666 pxor @tweak[5],@tweak[4]
1668 aesenclast `16*0`(%rsp),$inout0
1669 paddq @tweak[5],@tweak[5]
1671 aesenclast `16*1`(%rsp),$inout1
1672 aesenclast `16*2`(%rsp),$inout2
1673 pxor $twres,@tweak[5]
1674 aesenclast `16*3`(%rsp),$inout3
1675 aesenclast `16*4`(%rsp),$inout4
1676 aesenclast `16*5`(%rsp),$inout5
1677 mov $rnds_,$rounds # restore $rounds
1679 lea `16*6`($out),$out
1680 movups $inout0,`-16*6`($out) # write output
1681 movups $inout1,`-16*5`($out)
1682 movups $inout2,`-16*4`($out)
1683 movups $inout3,`-16*3`($out)
1684 movups $inout4,`-16*2`($out)
1685 movups $inout5,`-16*1`($out)
1687 jnc .Lxts_enc_grandloop
1689 lea 7($rounds,$rounds),$rounds # restore original value
1690 mov $key_,$key # restore $key
1691 mov $rounds,$rnds_ # backup $rounds
1694 pxor $rndkey0,@tweak[0]
1698 pxor $rndkey0,@tweak[1]
1701 pxor $rndkey0,@tweak[2]
1704 pxor $rndkey0,@tweak[3]
1707 pxor $rndkey0,@tweak[4]
1710 movdqu ($inp),$inout0
1711 movdqu 16*1($inp),$inout1
1712 movdqu 16*2($inp),$inout2
1713 pxor @tweak[0],$inout0
1714 movdqu 16*3($inp),$inout3
1715 pxor @tweak[1],$inout1
1716 movdqu 16*4($inp),$inout4
1718 pxor @tweak[2],$inout2
1719 pxor @tweak[3],$inout3
1720 pxor @tweak[4],$inout4
1722 call _aesni_encrypt6
1724 xorps @tweak[0],$inout0
1725 movdqa @tweak[5],@tweak[0]
1726 xorps @tweak[1],$inout1
1727 xorps @tweak[2],$inout2
1728 movdqu $inout0,($out)
1729 xorps @tweak[3],$inout3
1730 movdqu $inout1,16*1($out)
1731 xorps @tweak[4],$inout4
1732 movdqu $inout2,16*2($out)
1733 movdqu $inout3,16*3($out)
1734 movdqu $inout4,16*4($out)
1740 movups ($inp),$inout0
1742 xorps @tweak[0],$inout0
1744 &aesni_generate1("enc",$key,$rounds);
1746 xorps @tweak[0],$inout0
1747 movdqa @tweak[1],@tweak[0]
1748 movups $inout0,($out)
1754 movups ($inp),$inout0
1755 movups 16($inp),$inout1
1757 xorps @tweak[0],$inout0
1758 xorps @tweak[1],$inout1
1760 call _aesni_encrypt3
1762 xorps @tweak[0],$inout0
1763 movdqa @tweak[2],@tweak[0]
1764 xorps @tweak[1],$inout1
1765 movups $inout0,($out)
1766 movups $inout1,16*1($out)
1772 movups ($inp),$inout0
1773 movups 16*1($inp),$inout1
1774 movups 16*2($inp),$inout2
1776 xorps @tweak[0],$inout0
1777 xorps @tweak[1],$inout1
1778 xorps @tweak[2],$inout2
1780 call _aesni_encrypt3
1782 xorps @tweak[0],$inout0
1783 movdqa @tweak[3],@tweak[0]
1784 xorps @tweak[1],$inout1
1785 xorps @tweak[2],$inout2
1786 movups $inout0,($out)
1787 movups $inout1,16*1($out)
1788 movups $inout2,16*2($out)
1794 movups ($inp),$inout0
1795 movups 16*1($inp),$inout1
1796 movups 16*2($inp),$inout2
1797 xorps @tweak[0],$inout0
1798 movups 16*3($inp),$inout3
1800 xorps @tweak[1],$inout1
1801 xorps @tweak[2],$inout2
1802 xorps @tweak[3],$inout3
1804 call _aesni_encrypt4
1806 pxor @tweak[0],$inout0
1807 movdqa @tweak[4],@tweak[0]
1808 pxor @tweak[1],$inout1
1809 pxor @tweak[2],$inout2
1810 movdqu $inout0,($out)
1811 pxor @tweak[3],$inout3
1812 movdqu $inout1,16*1($out)
1813 movdqu $inout2,16*2($out)
1814 movdqu $inout3,16*3($out)
1825 movzb ($inp),%eax # borrow $rounds ...
1826 movzb -16($out),%ecx # ... and $key
1834 sub $len_,$out # rewind $out
1835 mov $key_,$key # restore $key
1836 mov $rnds_,$rounds # restore $rounds
1838 movups -16($out),$inout0
1839 xorps @tweak[0],$inout0
1841 &aesni_generate1("enc",$key,$rounds);
1843 xorps @tweak[0],$inout0
1844 movups $inout0,-16($out)
1848 $code.=<<___ if ($win64);
1849 movaps -0xa0(%rbp),%xmm6
1850 movaps -0x90(%rbp),%xmm7
1851 movaps -0x80(%rbp),%xmm8
1852 movaps -0x70(%rbp),%xmm9
1853 movaps -0x60(%rbp),%xmm10
1854 movaps -0x50(%rbp),%xmm11
1855 movaps -0x40(%rbp),%xmm12
1856 movaps -0x30(%rbp),%xmm13
1857 movaps -0x20(%rbp),%xmm14
1858 movaps -0x10(%rbp),%xmm15
1865 .size aesni_xts_encrypt,.-aesni_xts_encrypt
1869 .globl aesni_xts_decrypt
1870 .type aesni_xts_decrypt,\@function,6
1875 sub \$$frame_size,%rsp
1876 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
1878 $code.=<<___ if ($win64);
1879 movaps %xmm6,-0xa8(%rax)
1880 movaps %xmm7,-0x98(%rax)
1881 movaps %xmm8,-0x88(%rax)
1882 movaps %xmm9,-0x78(%rax)
1883 movaps %xmm10,-0x68(%rax)
1884 movaps %xmm11,-0x58(%rax)
1885 movaps %xmm12,-0x48(%rax)
1886 movaps %xmm13,-0x38(%rax)
1887 movaps %xmm14,-0x28(%rax)
1888 movaps %xmm15,-0x18(%rax)
1893 movups ($ivp),@tweak[5] # load clear-text tweak
1894 mov 240($key2),$rounds # key2->rounds
1895 mov 240($key),$rnds_ # key1->rounds
1897 # generate the tweak
1898 &aesni_generate1("enc",$key2,$rounds,@tweak[5]);
1900 xor %eax,%eax # if ($len%16) len-=16;
1906 $movkey ($key),$rndkey0 # zero round key
1907 mov $key,$key_ # backup $key
1908 mov $rnds_,$rounds # backup $rounds
1910 mov $len,$len_ # backup $len
1913 $movkey 16($key,$rnds_),$rndkey1 # last round key
1916 movdqa .Lxts_magic(%rip),$twmask
1917 pshufd \$0x5f,@tweak[5],$twres
1918 pxor $rndkey0,$rndkey1
1920 for ($i=0;$i<4;$i++) {
1922 movdqa $twres,$twtmp
1924 movdqa @tweak[5],@tweak[$i]
1925 psrad \$31,$twtmp # broadcast upper bits
1926 paddq @tweak[5],@tweak[5]
1928 pxor $rndkey0,@tweak[$i]
1929 pxor $twtmp,@tweak[5]
1933 movdqa @tweak[5],@tweak[4]
1935 paddq @tweak[5],@tweak[5]
1937 pxor $rndkey0,@tweak[4]
1938 pxor $twres,@tweak[5]
1939 movaps $rndkey1,0x60(%rsp) # save round[0]^round[last]
1946 $movkey 16($key_),$rndkey1
1948 lea .Lxts_magic(%rip),%r8
1949 jmp .Lxts_dec_grandloop
1952 .Lxts_dec_grandloop:
1953 movdqu `16*0`($inp),$inout0 # load input
1954 movdqa $rndkey0,$twmask
1955 movdqu `16*1`($inp),$inout1
1956 pxor @tweak[0],$inout0
1957 movdqu `16*2`($inp),$inout2
1958 pxor @tweak[1],$inout1
1959 aesdec $rndkey1,$inout0
1960 movdqu `16*3`($inp),$inout3
1961 pxor @tweak[2],$inout2
1962 aesdec $rndkey1,$inout1
1963 movdqu `16*4`($inp),$inout4
1964 pxor @tweak[3],$inout3
1965 aesdec $rndkey1,$inout2
1966 movdqu `16*5`($inp),$inout5
1967 pxor @tweak[5],$twmask # round[0]^=tweak[5]
1968 movdqa 0x60(%rsp),$twres # load round[0]^round[last]
1969 pxor @tweak[4],$inout4
1970 aesdec $rndkey1,$inout3
1971 $movkey 32($key_),$rndkey0
1972 lea `16*6`($inp),$inp
1973 pxor $twmask,$inout5
1975 pxor $twres,@tweak[0]
1976 aesdec $rndkey1,$inout4
1977 pxor $twres,@tweak[1]
1978 movdqa @tweak[0],`16*0`(%rsp) # put aside tweaks^last round key
1979 aesdec $rndkey1,$inout5
1980 $movkey 48($key_),$rndkey1
1982 aesdec $rndkey0,$inout0
1983 pxor $twres,@tweak[2]
1984 movdqa @tweak[1],`16*1`(%rsp)
1985 aesdec $rndkey0,$inout1
1986 pxor $twres,@tweak[3]
1987 movdqa @tweak[2],`16*2`(%rsp)
1988 aesdec $rndkey0,$inout2
1989 pxor $twres,@tweak[4]
1990 aesdec $rndkey0,$inout3
1992 movdqa @tweak[4],`16*4`(%rsp)
1993 aesdec $rndkey0,$inout4
1994 movdqa $twmask,`16*5`(%rsp)
1995 aesdec $rndkey0,$inout5
1996 $movkey 64($key_),$rndkey0
1998 pshufd \$0x5f,@tweak[5],$twres
2002 aesdec $rndkey1,$inout0
2003 aesdec $rndkey1,$inout1
2004 aesdec $rndkey1,$inout2
2005 aesdec $rndkey1,$inout3
2006 aesdec $rndkey1,$inout4
2007 aesdec $rndkey1,$inout5
2008 $movkey 16($key),$rndkey1
2011 aesdec $rndkey0,$inout0
2012 aesdec $rndkey0,$inout1
2013 aesdec $rndkey0,$inout2
2014 aesdec $rndkey0,$inout3
2015 aesdec $rndkey0,$inout4
2016 aesdec $rndkey0,$inout5
2017 $movkey ($key),$rndkey0
2021 movdqa (%r8),$twmask
2022 movdqa $twres,$twtmp
2024 aesdec $rndkey1,$inout0
2025 paddq @tweak[5],@tweak[5]
2027 aesdec $rndkey1,$inout1
2029 $movkey ($key_),@tweak[0] # load round[0]
2030 aesdec $rndkey1,$inout2
2031 aesdec $rndkey1,$inout3
2032 pxor $twtmp,@tweak[5]
2033 aesdec $rndkey1,$inout4
2034 movaps @tweak[0],@tweak[1] # copy round[0]
2035 aesdec $rndkey1,$inout5
2036 $movkey 16($key),$rndkey1
2038 movdqa $twres,$twtmp
2040 aesdec $rndkey0,$inout0
2041 pxor @tweak[5],@tweak[0]
2043 aesdec $rndkey0,$inout1
2044 paddq @tweak[5],@tweak[5]
2046 aesdec $rndkey0,$inout2
2047 aesdec $rndkey0,$inout3
2048 pxor $twtmp,@tweak[5]
2049 aesdec $rndkey0,$inout4
2050 movaps @tweak[1],@tweak[2]
2051 aesdec $rndkey0,$inout5
2052 $movkey 32($key),$rndkey0
2054 movdqa $twres,$twtmp
2056 aesdec $rndkey1,$inout0
2057 pxor @tweak[5],@tweak[1]
2059 aesdec $rndkey1,$inout1
2060 paddq @tweak[5],@tweak[5]
2062 aesdec $rndkey1,$inout2
2063 movdqa @tweak[3],`16*3`(%rsp)
2064 aesdec $rndkey1,$inout3
2065 pxor $twtmp,@tweak[5]
2066 aesdec $rndkey1,$inout4
2067 movaps @tweak[2],@tweak[3]
2068 aesdec $rndkey1,$inout5
2069 $movkey 48($key),$rndkey1
2071 movdqa $twres,$twtmp
2073 aesdec $rndkey0,$inout0
2074 pxor @tweak[5],@tweak[2]
2076 aesdec $rndkey0,$inout1
2077 paddq @tweak[5],@tweak[5]
2079 aesdec $rndkey0,$inout2
2080 aesdec $rndkey0,$inout3
2081 pxor $twtmp,@tweak[5]
2082 aesdec $rndkey0,$inout4
2083 movaps @tweak[3],@tweak[4]
2084 aesdec $rndkey0,$inout5
2086 movdqa $twres,$rndkey0
2088 aesdec $rndkey1,$inout0
2089 pxor @tweak[5],@tweak[3]
2091 aesdec $rndkey1,$inout1
2092 paddq @tweak[5],@tweak[5]
2093 pand $twmask,$rndkey0
2094 aesdec $rndkey1,$inout2
2095 aesdec $rndkey1,$inout3
2096 pxor $rndkey0,@tweak[5]
2097 $movkey ($key_),$rndkey0
2098 aesdec $rndkey1,$inout4
2099 aesdec $rndkey1,$inout5
2100 $movkey 16($key_),$rndkey1
2102 pxor @tweak[5],@tweak[4]
2104 aesdeclast `16*0`(%rsp),$inout0
2105 paddq @tweak[5],@tweak[5]
2107 aesdeclast `16*1`(%rsp),$inout1
2108 aesdeclast `16*2`(%rsp),$inout2
2109 pxor $twres,@tweak[5]
2110 aesdeclast `16*3`(%rsp),$inout3
2111 aesdeclast `16*4`(%rsp),$inout4
2112 aesdeclast `16*5`(%rsp),$inout5
2113 mov $rnds_,$rounds # restore $rounds
2115 lea `16*6`($out),$out
2116 movups $inout0,`-16*6`($out) # write output
2117 movups $inout1,`-16*5`($out)
2118 movups $inout2,`-16*4`($out)
2119 movups $inout3,`-16*3`($out)
2120 movups $inout4,`-16*2`($out)
2121 movups $inout5,`-16*1`($out)
2123 jnc .Lxts_dec_grandloop
2125 lea 7($rounds,$rounds),$rounds # restore original value
2126 mov $key_,$key # restore $key
2127 mov $rounds,$rnds_ # backup $rounds
2130 pxor $rndkey0,@tweak[0]
2131 pxor $rndkey0,@tweak[1]
2135 pxor $rndkey0,@tweak[2]
2138 pxor $rndkey0,@tweak[3]
2141 pxor $rndkey0,@tweak[4]
2146 movdqu ($inp),$inout0
2147 movdqu 16*1($inp),$inout1
2148 movdqu 16*2($inp),$inout2
2149 pxor @tweak[0],$inout0
2150 movdqu 16*3($inp),$inout3
2151 pxor @tweak[1],$inout1
2152 movdqu 16*4($inp),$inout4
2154 pxor @tweak[2],$inout2
2155 pxor @tweak[3],$inout3
2156 pxor @tweak[4],$inout4
2158 call _aesni_decrypt6
2160 xorps @tweak[0],$inout0
2161 xorps @tweak[1],$inout1
2162 xorps @tweak[2],$inout2
2163 movdqu $inout0,($out)
2164 xorps @tweak[3],$inout3
2165 movdqu $inout1,16*1($out)
2166 xorps @tweak[4],$inout4
2167 movdqu $inout2,16*2($out)
2169 movdqu $inout3,16*3($out)
2170 pcmpgtd @tweak[5],$twtmp
2171 movdqu $inout4,16*4($out)
2173 pshufd \$0x13,$twtmp,@tweak[1] # $twres
2177 movdqa @tweak[5],@tweak[0]
2178 paddq @tweak[5],@tweak[5] # psllq 1,$tweak
2179 pand $twmask,@tweak[1] # isolate carry and residue
2180 pxor @tweak[5],@tweak[1]
2185 movups ($inp),$inout0
2187 xorps @tweak[0],$inout0
2189 &aesni_generate1("dec",$key,$rounds);
2191 xorps @tweak[0],$inout0
2192 movdqa @tweak[1],@tweak[0]
2193 movups $inout0,($out)
2194 movdqa @tweak[2],@tweak[1]
2200 movups ($inp),$inout0
2201 movups 16($inp),$inout1
2203 xorps @tweak[0],$inout0
2204 xorps @tweak[1],$inout1
2206 call _aesni_decrypt3
2208 xorps @tweak[0],$inout0
2209 movdqa @tweak[2],@tweak[0]
2210 xorps @tweak[1],$inout1
2211 movdqa @tweak[3],@tweak[1]
2212 movups $inout0,($out)
2213 movups $inout1,16*1($out)
2219 movups ($inp),$inout0
2220 movups 16*1($inp),$inout1
2221 movups 16*2($inp),$inout2
2223 xorps @tweak[0],$inout0
2224 xorps @tweak[1],$inout1
2225 xorps @tweak[2],$inout2
2227 call _aesni_decrypt3
2229 xorps @tweak[0],$inout0
2230 movdqa @tweak[3],@tweak[0]
2231 xorps @tweak[1],$inout1
2232 movdqa @tweak[4],@tweak[1]
2233 xorps @tweak[2],$inout2
2234 movups $inout0,($out)
2235 movups $inout1,16*1($out)
2236 movups $inout2,16*2($out)
2242 movups ($inp),$inout0
2243 movups 16*1($inp),$inout1
2244 movups 16*2($inp),$inout2
2245 xorps @tweak[0],$inout0
2246 movups 16*3($inp),$inout3
2248 xorps @tweak[1],$inout1
2249 xorps @tweak[2],$inout2
2250 xorps @tweak[3],$inout3
2252 call _aesni_decrypt4
2254 pxor @tweak[0],$inout0
2255 movdqa @tweak[4],@tweak[0]
2256 pxor @tweak[1],$inout1
2257 movdqa @tweak[5],@tweak[1]
2258 pxor @tweak[2],$inout2
2259 movdqu $inout0,($out)
2260 pxor @tweak[3],$inout3
2261 movdqu $inout1,16*1($out)
2262 movdqu $inout2,16*2($out)
2263 movdqu $inout3,16*3($out)
2273 mov $key_,$key # restore $key
2274 mov $rnds_,$rounds # restore $rounds
2276 movups ($inp),$inout0
2277 xorps @tweak[1],$inout0
2279 &aesni_generate1("dec",$key,$rounds);
2281 xorps @tweak[1],$inout0
2282 movups $inout0,($out)
2285 movzb 16($inp),%eax # borrow $rounds ...
2286 movzb ($out),%ecx # ... and $key
2294 sub $len_,$out # rewind $out
2295 mov $key_,$key # restore $key
2296 mov $rnds_,$rounds # restore $rounds
2298 movups ($out),$inout0
2299 xorps @tweak[0],$inout0
2301 &aesni_generate1("dec",$key,$rounds);
2303 xorps @tweak[0],$inout0
2304 movups $inout0,($out)
2308 $code.=<<___ if ($win64);
2309 movaps -0xa0(%rbp),%xmm6
2310 movaps -0x90(%rbp),%xmm7
2311 movaps -0x80(%rbp),%xmm8
2312 movaps -0x70(%rbp),%xmm9
2313 movaps -0x60(%rbp),%xmm10
2314 movaps -0x50(%rbp),%xmm11
2315 movaps -0x40(%rbp),%xmm12
2316 movaps -0x30(%rbp),%xmm13
2317 movaps -0x20(%rbp),%xmm14
2318 movaps -0x10(%rbp),%xmm15
2325 .size aesni_xts_decrypt,.-aesni_xts_decrypt
2329 ########################################################################
2330 # void $PREFIX_cbc_encrypt (const void *inp, void *out,
2331 # size_t length, const AES_KEY *key,
2332 # unsigned char *ivp,const int enc);
2334 my $frame_size = 0x10 + ($win64?0xa0:0); # used in decrypt
2335 my ($iv,$in0,$in1,$in2,$in3,$in4)=map("%xmm$_",(10..15));
2339 .globl ${PREFIX}_cbc_encrypt
2340 .type ${PREFIX}_cbc_encrypt,\@function,6
2342 ${PREFIX}_cbc_encrypt:
2343 test $len,$len # check length
2346 mov 240($key),$rnds_ # key->rounds
2347 mov $key,$key_ # backup $key
2348 test %r9d,%r9d # 6th argument
2350 #--------------------------- CBC ENCRYPT ------------------------------#
2351 movups ($ivp),$inout0 # load iv as initial state
2359 movups ($inp),$inout1 # load input
2361 #xorps $inout1,$inout0
2363 &aesni_generate1("enc",$key,$rounds,$inout0,$inout1);
2365 mov $rnds_,$rounds # restore $rounds
2366 mov $key_,$key # restore $key
2367 movups $inout0,0($out) # store output
2373 movups $inout0,($ivp)
2377 mov $len,%rcx # zaps $key
2378 xchg $inp,$out # $inp is %rsi and $out is %rdi now
2379 .long 0x9066A4F3 # rep movsb
2380 mov \$16,%ecx # zero tail
2383 .long 0x9066AAF3 # rep stosb
2384 lea -16(%rdi),%rdi # rewind $out by 1 block
2385 mov $rnds_,$rounds # restore $rounds
2386 mov %rdi,%rsi # $inp and $out are the same
2387 mov $key_,$key # restore $key
2388 xor $len,$len # len=16
2389 jmp .Lcbc_enc_loop # one more spin
2390 \f#--------------------------- CBC DECRYPT ------------------------------#
2395 sub \$$frame_size,%rsp
2396 and \$-16,%rsp # Linux kernel stack can be incorrectly seeded
2398 $code.=<<___ if ($win64);
2399 movaps %xmm6,0x10(%rsp)
2400 movaps %xmm7,0x20(%rsp)
2401 movaps %xmm8,0x30(%rsp)
2402 movaps %xmm9,0x40(%rsp)
2403 movaps %xmm10,0x50(%rsp)
2404 movaps %xmm11,0x60(%rsp)
2405 movaps %xmm12,0x70(%rsp)
2406 movaps %xmm13,0x80(%rsp)
2407 movaps %xmm14,0x90(%rsp)
2408 movaps %xmm15,0xa0(%rsp)
2418 $movkey ($key),$rndkey0
2419 movdqu 0x00($inp),$inout0 # load input
2420 movdqu 0x10($inp),$inout1
2422 movdqu 0x20($inp),$inout2
2424 movdqu 0x30($inp),$inout3
2426 movdqu 0x40($inp),$inout4
2428 movdqu 0x50($inp),$inout5
2431 jbe .Lcbc_dec_six_or_seven
2434 lea 0x70($key),$key # size optimization
2435 jmp .Lcbc_dec_loop8_enter
2438 movups $inout7,($out)
2440 .Lcbc_dec_loop8_enter:
2441 movdqu 0x60($inp),$inout6
2442 pxor $rndkey0,$inout0
2443 movdqu 0x70($inp),$inout7
2444 pxor $rndkey0,$inout1
2445 $movkey 0x10-0x70($key),$rndkey1
2446 pxor $rndkey0,$inout2
2448 cmp \$0x70,$len # is there at least 0x60 bytes ahead?
2449 pxor $rndkey0,$inout3
2450 pxor $rndkey0,$inout4
2451 pxor $rndkey0,$inout5
2452 pxor $rndkey0,$inout6
2454 aesdec $rndkey1,$inout0
2455 pxor $rndkey0,$inout7
2456 $movkey 0x20-0x70($key),$rndkey0
2457 aesdec $rndkey1,$inout1
2458 aesdec $rndkey1,$inout2
2459 aesdec $rndkey1,$inout3
2460 aesdec $rndkey1,$inout4
2461 aesdec $rndkey1,$inout5
2463 aesdec $rndkey1,$inout6
2465 aesdec $rndkey1,$inout7
2467 $movkey 0x30-0x70($key),$rndkey1
2469 for($i=1;$i<12;$i++) {
2470 my $rndkeyx = ($i&1)?$rndkey0:$rndkey1;
2472 aesdec $rndkeyx,$inout0
2473 aesdec $rndkeyx,$inout1
2474 aesdec $rndkeyx,$inout2
2475 aesdec $rndkeyx,$inout3
2476 aesdec $rndkeyx,$inout4
2477 aesdec $rndkeyx,$inout5
2478 aesdec $rndkeyx,$inout6
2479 aesdec $rndkeyx,$inout7
2480 $movkey `0x30+0x10*$i`-0x70($key),$rndkeyx
2482 $code.=<<___ if ($i==7);
2486 $code.=<<___ if ($i==9);
2492 aesdec $rndkey1,$inout0
2494 aesdec $rndkey1,$inout1
2496 aesdec $rndkey1,$inout2
2498 aesdec $rndkey1,$inout3
2500 aesdec $rndkey1,$inout4
2502 aesdec $rndkey1,$inout5
2504 aesdec $rndkey1,$inout6
2505 aesdec $rndkey1,$inout7
2506 movdqu 0x50($inp),$rndkey1
2508 aesdeclast $iv,$inout0
2509 movdqu 0x60($inp),$iv # borrow $iv
2510 pxor $rndkey0,$rndkey1
2511 aesdeclast $in0,$inout1
2513 movdqu 0x70($inp),$rndkey0 # next IV
2515 aesdeclast $in1,$inout2
2516 movdqu 0x00($inp_),$in0
2517 aesdeclast $in2,$inout3
2518 movdqu 0x10($inp_),$in1
2519 aesdeclast $in3,$inout4
2520 movdqu 0x20($inp_),$in2
2521 aesdeclast $in4,$inout5
2522 movdqu 0x30($inp_),$in3
2523 aesdeclast $rndkey1,$inout6
2524 movdqu 0x40($inp_),$in4
2525 aesdeclast $iv,$inout7
2526 movdqa $rndkey0,$iv # return $iv
2527 movdqu 0x50($inp_),$rndkey1
2528 $movkey -0x70($key),$rndkey0
2530 movups $inout0,($out) # store output
2532 movups $inout1,0x10($out)
2534 movups $inout2,0x20($out)
2536 movups $inout3,0x30($out)
2538 movups $inout4,0x40($out)
2540 movups $inout5,0x50($out)
2541 movdqa $rndkey1,$inout5
2542 movups $inout6,0x60($out)
2548 movaps $inout7,$inout0
2549 lea -0x70($key),$key
2551 jle .Lcbc_dec_tail_collected
2552 movups $inout7,($out)
2558 .Lcbc_dec_six_or_seven:
2562 movaps $inout5,$inout6
2563 call _aesni_decrypt6
2564 pxor $iv,$inout0 # ^= IV
2567 movdqu $inout0,($out)
2569 movdqu $inout1,0x10($out)
2571 movdqu $inout2,0x20($out)
2573 movdqu $inout3,0x30($out)
2575 movdqu $inout4,0x40($out)
2577 movdqa $inout5,$inout0
2578 jmp .Lcbc_dec_tail_collected
2582 movups 0x60($inp),$inout6
2583 xorps $inout7,$inout7
2584 call _aesni_decrypt8
2585 movups 0x50($inp),$inout7
2586 pxor $iv,$inout0 # ^= IV
2587 movups 0x60($inp),$iv
2589 movdqu $inout0,($out)
2591 movdqu $inout1,0x10($out)
2593 movdqu $inout2,0x20($out)
2595 movdqu $inout3,0x30($out)
2597 movdqu $inout4,0x40($out)
2598 pxor $inout7,$inout6
2599 movdqu $inout5,0x50($out)
2601 movdqa $inout6,$inout0
2602 jmp .Lcbc_dec_tail_collected
2605 movups ($inp),$inout0
2609 movups 0x10($inp),$inout1
2614 movups 0x20($inp),$inout2
2619 movups 0x30($inp),$inout3
2624 movups 0x40($inp),$inout4
2627 xorps $inout5,$inout5
2628 call _aesni_decrypt6
2632 movdqu $inout0,($out)
2634 movdqu $inout1,0x10($out)
2636 movdqu $inout2,0x20($out)
2638 movdqu $inout3,0x30($out)
2640 movdqa $inout4,$inout0
2642 jmp .Lcbc_dec_tail_collected
2648 &aesni_generate1("dec",$key,$rounds);
2652 jmp .Lcbc_dec_tail_collected
2656 xorps $inout2,$inout2
2657 call _aesni_decrypt3
2661 movdqu $inout0,($out)
2662 movdqa $inout1,$inout0
2664 jmp .Lcbc_dec_tail_collected
2668 call _aesni_decrypt3
2672 movdqu $inout0,($out)
2674 movdqu $inout1,0x10($out)
2675 movdqa $inout2,$inout0
2677 jmp .Lcbc_dec_tail_collected
2681 call _aesni_decrypt4
2685 movdqu $inout0,($out)
2687 movdqu $inout1,0x10($out)
2689 movdqu $inout2,0x20($out)
2690 movdqa $inout3,$inout0
2692 jmp .Lcbc_dec_tail_collected
2695 .Lcbc_dec_tail_collected:
2698 jnz .Lcbc_dec_tail_partial
2699 movups $inout0,($out)
2702 .Lcbc_dec_tail_partial:
2703 movaps $inout0,(%rsp)
2708 .long 0x9066A4F3 # rep movsb
2712 $code.=<<___ if ($win64);
2713 movaps 0x10(%rsp),%xmm6
2714 movaps 0x20(%rsp),%xmm7
2715 movaps 0x30(%rsp),%xmm8
2716 movaps 0x40(%rsp),%xmm9
2717 movaps 0x50(%rsp),%xmm10
2718 movaps 0x60(%rsp),%xmm11
2719 movaps 0x70(%rsp),%xmm12
2720 movaps 0x80(%rsp),%xmm13
2721 movaps 0x90(%rsp),%xmm14
2722 movaps 0xa0(%rsp),%xmm15
2729 .size ${PREFIX}_cbc_encrypt,.-${PREFIX}_cbc_encrypt
2732 # int $PREFIX_set_[en|de]crypt_key (const unsigned char *userKey,
2733 # int bits, AES_KEY *key)
2734 { my ($inp,$bits,$key) = @_4args;
2738 .globl ${PREFIX}_set_decrypt_key
2739 .type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
2741 ${PREFIX}_set_decrypt_key:
2742 .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
2743 call __aesni_set_encrypt_key
2744 shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key
2747 lea 16($key,$bits),$inp # points at the end of key schedule
2749 $movkey ($key),%xmm0 # just swap
2750 $movkey ($inp),%xmm1
2751 $movkey %xmm0,($inp)
2752 $movkey %xmm1,($key)
2757 $movkey ($key),%xmm0 # swap and inverse
2758 $movkey ($inp),%xmm1
2763 $movkey %xmm0,16($inp)
2764 $movkey %xmm1,-16($key)
2766 ja .Ldec_key_inverse
2768 $movkey ($key),%xmm0 # inverse middle
2770 $movkey %xmm0,($inp)
2774 .LSEH_end_set_decrypt_key:
2775 .size ${PREFIX}_set_decrypt_key,.-${PREFIX}_set_decrypt_key
2778 # This is based on submission by
2780 # Huang Ying <ying.huang@intel.com>
2781 # Vinodh Gopal <vinodh.gopal@intel.com>
2784 # Agressively optimized in respect to aeskeygenassist's critical path
2785 # and is contained in %xmm0-5 to meet Win64 ABI requirement.
2788 .globl ${PREFIX}_set_encrypt_key
2789 .type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
2791 ${PREFIX}_set_encrypt_key:
2792 __aesni_set_encrypt_key:
2793 .byte 0x48,0x83,0xEC,0x08 # sub rsp,8
2800 movups ($inp),%xmm0 # pull first 128 bits of *userKey
2801 xorps %xmm4,%xmm4 # low dword of xmm4 is assumed 0
2811 mov \$9,$bits # 10 rounds for 128-bit key
2812 $movkey %xmm0,($key) # round 0
2813 aeskeygenassist \$0x1,%xmm0,%xmm1 # round 1
2814 call .Lkey_expansion_128_cold
2815 aeskeygenassist \$0x2,%xmm0,%xmm1 # round 2