<html><body><p><tt>Hi Stefan,</tt><br><br><tt>> +uint32_t tpm_get_maximum_cmd_size(void)<br>> +{<br>> +   uint32_t rc;<br>> +   uint32_t return_code;<br>> +   struct tpm_rsp_getcap_buffersize buffersize;<br>> +   uint32_t result;<br>> +<br>> +   if (!has_working_tpm())<br>> +      return 0;<br>> +<br>> +   rc = build_and_send_cmd(TPM_ORD_GET_CAPABILITY,<br>> +            get_capability_buffer_size,<br>> +            sizeof(get_capability_buffer_size),<br>> +            (uint8_t *)&buffersize, sizeof(buffersize),<br>> +            &return_code, TPM_DURATION_TYPE_SHORT);<br>> +<br>> +   if (rc || return_code)<br>> +      goto err_exit;<br>> +<br>> +   result = MIN(cpu_to_be32(buffersize.buffersize),</tt><br><br><tt>Should it be using be32_to_cpu() instead? I thought we were getting buffersize in network-byte-order format.</tt><br><br><br><tt><br>> +                spapr_vtpm_get_buffersize());<br>> +<br>> +   return result;<br>> +<br>> +err_exit:<br>> +   dprintf("TPM malfunctioning (line %d).\n", __LINE__);<br>> +<br>> +   tpm_set_failure();<br>> +<br>> +   return 0;<br>> +}<br></tt><br><br><br>Regards,<br>Vicky<br><br>Hon Ching (Vicky) Lo<br>Linux Security Development<br>Notes:  lo1@us.ibm.com<br>Office:  845-435-8946 (T/L: 295-8946)<br><br><br><BR>
</body></html>