Sync with upstream repo.

Changes include:
* CPU check has been broken up into a number of small libraries
* BoringSSL option has been removed
* Better abseil integration
This commit is contained in:
Kevin Boyd
2020-11-09 13:03:39 -08:00
parent e71781fd7a
commit 83eed0a886
21 changed files with 2298 additions and 1551 deletions

View File

@@ -26,28 +26,6 @@ if (NOT CMAKE_BUILD_TYPE)
endif(NOT CMAKE_BUILD_TYPE)
# Begin Google local change
# We use an in tree copy of boringSSL by default.
option(USE_BORINGSSL "build with boringSSL" OFF)
option(IN_GOOGLE3 "building in google3" OFF)
# The vendors subdirectories may not be present.
find_path(
VENDORS_AMD_PATH
NAMES amd.cc
PATHS ${CMAKE_CURRENT_SOURCE_DIR}/vendors/amd
NO_DEFAULT_PATH
)
find_path(
VENDORS_INTEL_PATH
NAMES intel.cc
PATHS ${CMAKE_CURRENT_SOURCE_DIR}/vendors/intel
NO_DEFAULT_PATH
)
# End Google local change
# Config header
configure_file (
@@ -78,24 +56,20 @@ set(CMAKE_CXX_EXTENSIONS OFF) # we want c++17 not gnu++17
add_executable(cpu_check cpu_check.cc)
add_executable(crc32c_test crc32c_test.cc)
# Third party library - available as git submodule
add_library(farmhash third_party/farmhash/src/farmhash.cc)
add_library(avx avx.cc)
add_library(compressor compressor.cc)
add_library(crc32c crc32c.c)
add_library(crypto crypto.cc)
add_library(fvt_controller fvt_controller.cc)
add_library(hasher hasher.cc)
add_library(malign_buffer malign_buffer.cc)
add_library(pattern_generator pattern_generator.cc)
add_library(silkscreen silkscreen.cc)
add_library(utils utils.cc)
# Begin Google local change
if (VENDORS_AMD_PATH)
add_library(amd ${VENDORS_AMD_PATH}/amd.cc)
add_library(hsmp ${VENDORS_AMD_PATH}/hsmp.cc)
target_link_libraries(amd hsmp pci)
set(VENDORS_LIBS ${VENDORS_LIBS} amd)
endif(VENDORS_AMD_PATH)
if (VENDORS_INTEL_PATH)
add_library(intel ${VENDORS_INTEL_PATH}/intel.cc)
set(VENDORS_LIBS ${VENDORS_LIBS} intel)
endif(VENDORS_INTEL_PATH)
# End Google local change
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-march=sandybridge" ARCH_SANDYBRIDGE)
@@ -104,16 +78,20 @@ if(ARCH_SANDYBRIDGE)
target_compile_options(crc32c PUBLIC -march=sandybridge)
endif(ARCH_SANDYBRIDGE)
target_link_libraries(cpu_check crc32c farmhash)
# Begin Google local change
target_link_libraries(cpu_check fvt_controller ${VENDORS_LIBS} utils)
# End Google local change
target_link_libraries(crc32c_test crc32c)
if (BUILD_STATIC)
set(CMAKE_FIND_LIBRARY_SUFFIXES ".a")
endif(BUILD_STATIC)
# Needs abseil
find_package(absl REQUIRED)
target_link_libraries(compressor absl::status absl::strings)
target_link_libraries(crypto absl::status absl::strings)
target_link_libraries(fvt_controller absl::strings)
target_link_libraries(malign_buffer absl::strings)
target_link_libraries(silkscreen absl::status absl::strings)
target_link_libraries(utils absl::strings)
target_link_libraries(cpu_check absl::failure_signal_handler absl::statusor absl::strings absl::symbolize)
# Needs pthreads
find_package(Threads REQUIRED)
target_link_libraries(cpu_check Threads::Threads)
@@ -125,22 +103,17 @@ if(ZLIB_INCLUDE_DIRS)
endif(ZLIB_INCLUDE_DIRS)
if(ZLIB_LIBRARIES)
target_link_libraries(cpu_check ${ZLIB_LIBRARIES})
target_link_libraries(compressor ${ZLIB_LIBRARIES})
target_link_libraries(hasher ${ZLIB_LIBRARIES})
endif(ZLIB_LIBRARIES)
# Begin Google local change
if(USE_BORINGSSL)
set(BORINGSSL_PATH ${CMAKE_CURRENT_SOURCE_DIR}/boringssl)
add_subdirectory(${BORINGSSL_PATH})
set(BORINGSSL_INCLUDE_DIRS ${BORINGSSL_PATH}/include)
include_directories("${BORINGSSL_PATH}/include")
target_link_libraries(cpu_check ssl crypto)
else(USE_BORINGSSL)
# End Google local change
# Needs OpenSSL
find_package (OpenSSL REQUIRED)
include_directories(${OPENSSL_INCLUDE_DIRS})
target_link_libraries(cpu_check ${OPENSSL_LIBRARIES})
target_link_libraries(crypto ${OPENSSL_LIBRARIES})
target_link_libraries(hasher ${OPENSSL_LIBRARIES})
# Static linking of OpenSSL may require -ldl, link it if found.
find_library (dl dl)
@@ -148,8 +121,18 @@ if(dl)
target_link_libraries(cpu_check dl)
endif(dl)
# Begin Google local change
endif(USE_BORINGSSL)
# End Google local change
# link malign_buffer first as it has a lot of dependencies.
target_link_libraries(malign_buffer utils)
target_link_libraries(crc32c_test crc32c)
target_link_libraries(compressor malign_buffer)
target_link_libraries(crypto malign_buffer)
target_link_libraries(hasher crc32c farmhash malign_buffer utils)
target_link_libraries(pattern_generator malign_buffer)
target_link_libraries(silkscreen utils)
target_link_libraries(cpu_check avx compressor crc32c crypto fvt_controller hasher malign_buffer pattern_generator silkscreen utils)
install (TARGETS cpu_check DESTINATION bin)

View File

@@ -22,7 +22,11 @@ Designed to run under Unix/Linux OS.
* cmake: https://cmake.org/
* zlib
* OpenSSL/BoringSSL
* OpenSSL
* Abseil-cpp: https://github.com/abseil/abseil-cpp
Note that Abseil must be built with the C++17 standard and include the
StatusOr package (release 2020_09_23 or later).
## Building

194
avx.cc Normal file
View File

@@ -0,0 +1,194 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "avx.h"
#if defined(__i386__) || defined(__x86_64__)
#include <immintrin.h>
#endif
#if defined(__i386__) || defined(__x86_64__)
#define X86_TARGET_ATTRIBUTE(s) __attribute__((target(s)))
#else
#define X86_TARGET_ATTRIBUTE(s)
#endif
#if defined(__i386__) || defined(__x86_64__)
bool Avx::can_do_avx() {
__builtin_cpu_init();
return __builtin_cpu_supports("avx");
}
bool Avx::can_do_avx512f() {
__builtin_cpu_init();
return __builtin_cpu_supports("avx512f");
}
bool Avx::can_do_fma() {
__builtin_cpu_init();
return __builtin_cpu_supports("fma");
}
#else
bool Avx::can_do_avx() { return false; }
bool Avx::can_do_avx512f() { return false; }
bool Avx::can_do_fma() { return false; }
#endif
std::string Avx::MaybeGoHot() {
if (std::uniform_int_distribution<int>(0, 1)(rng_)) {
// Don't provoke.
level_ = 0;
return "";
}
if (can_do_avx512f()) {
// Processor supports both AVX and AVX512.
level_ = std::uniform_int_distribution<int>(0, 1)(rng_) ? 3 : 5;
} else {
// Processor supports only AVX.
level_ = 3;
}
return BurnIfAvxHeavy();
}
std::string Avx::BurnIfAvxHeavy() {
if (level_ == 3) {
return can_do_fma() ? Avx256FMA(kIterations) : Avx256(kIterations);
}
if (level_ == 5) {
return Avx512(kIterations);
}
return "";
}
// See notes for Avx512 below
X86_TARGET_ATTRIBUTE("avx")
std::string Avx::Avx256(int rounds) {
#if (defined(__i386__) || defined(__x86_64__))
const __m256d minus_four = _mm256_set1_pd(-4.0);
__m256d x[4];
for (int k = 0; k < 4; k++) {
x[k] =
_mm256_set1_pd(std::uniform_real_distribution<double>(0.0, 1.0)(rng_));
}
double *gross_x[4] = {
reinterpret_cast<double *>(&x[0]),
reinterpret_cast<double *>(&x[1]),
reinterpret_cast<double *>(&x[2]),
reinterpret_cast<double *>(&x[3]),
};
for (int i = 0; i < rounds; i++) {
__m256d a[4];
a[0] = _mm256_sub_pd(_mm256_mul_pd(x[0], x[0]), x[0]);
a[1] = _mm256_sub_pd(_mm256_mul_pd(x[1], x[1]), x[1]);
a[2] = _mm256_sub_pd(_mm256_mul_pd(x[2], x[2]), x[2]);
a[3] = _mm256_sub_pd(_mm256_mul_pd(x[3], x[3]), x[3]);
x[0] = _mm256_mul_pd(minus_four, a[0]);
x[1] = _mm256_mul_pd(minus_four, a[1]);
x[2] = _mm256_mul_pd(minus_four, a[2]);
x[3] = _mm256_mul_pd(minus_four, a[3]);
}
for (int k = 1; k < 4; k++) {
for (int i = 0; i < 4; i++) {
if (gross_x[k][i] != gross_x[k][0]) {
return "avx256 pd";
}
}
}
#endif
return "";
}
// See notes for Avx512 below
X86_TARGET_ATTRIBUTE("avx,fma")
std::string Avx::Avx256FMA(int rounds) {
#if (defined(__i386__) || defined(__x86_64__))
const __m256d minus_four = _mm256_set1_pd(-4.0);
__m256d x[4];
for (int k = 0; k < 4; k++) {
x[k] =
_mm256_set1_pd(std::uniform_real_distribution<double>(0.0, 1.0)(rng_));
}
double *gross_x[4] = {
reinterpret_cast<double *>(&x[0]),
reinterpret_cast<double *>(&x[1]),
reinterpret_cast<double *>(&x[2]),
reinterpret_cast<double *>(&x[3]),
};
for (int i = 0; i < rounds; i++) {
__m256d a[4];
a[0] = _mm256_fmsub_pd(x[0], x[0], x[0]);
a[1] = _mm256_fmsub_pd(x[1], x[1], x[1]);
a[2] = _mm256_fmsub_pd(x[2], x[2], x[2]);
a[3] = _mm256_fmsub_pd(x[3], x[3], x[3]);
x[0] = _mm256_mul_pd(minus_four, a[0]);
x[1] = _mm256_mul_pd(minus_four, a[1]);
x[2] = _mm256_mul_pd(minus_four, a[2]);
x[3] = _mm256_mul_pd(minus_four, a[3]);
}
for (int k = 1; k < 4; k++) {
for (int i = 0; i < 4; i++) {
if (gross_x[k][i] != gross_x[k][0]) {
return "avx256 pd";
}
}
}
#endif
return "";
}
// Interleave AVX512 parallel calculation of iterates of f(x) = 4x(1-x).
// Hope compiler too dumb to see through this.
X86_TARGET_ATTRIBUTE("avx512f")
std::string Avx::Avx512(int rounds) {
#if (defined(__i386__) || defined(__x86_64__))
const __m512d minus_four = _mm512_set1_pd(-4.0);
__m512d x[4];
for (int k = 0; k < 4; k++) {
x[k] =
_mm512_set1_pd(std::uniform_real_distribution<double>(0.0, 1.0)(rng_));
}
double *gross_x[4] = {
reinterpret_cast<double *>(&x[0]),
reinterpret_cast<double *>(&x[1]),
reinterpret_cast<double *>(&x[2]),
reinterpret_cast<double *>(&x[3]),
};
for (int i = 0; i < rounds; i++) {
__m512d a[4];
a[0] = _mm512_fmsub_pd(x[0], x[0], x[0]);
a[1] = _mm512_fmsub_pd(x[1], x[1], x[1]);
a[2] = _mm512_fmsub_pd(x[2], x[2], x[2]);
a[3] = _mm512_fmsub_pd(x[3], x[3], x[3]);
x[0] = _mm512_mul_pd(minus_four, a[0]);
x[1] = _mm512_mul_pd(minus_four, a[1]);
x[2] = _mm512_mul_pd(minus_four, a[2]);
x[3] = _mm512_mul_pd(minus_four, a[3]);
}
for (int k = 1; k < 4; k++) {
for (int i = 0; i < 7; i++) {
if (gross_x[k][i] != gross_x[k][0]) {
return "avx512 pd";
}
}
}
#endif
return "";
}

55
avx.h Normal file
View File

@@ -0,0 +1,55 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <random>
#include <string>
#ifndef THIRD_PARTY_CPU_CHECK_AVX_H_
#define THIRD_PARTY_CPU_CHECK_AVX_H_
// x86 AVX usage has complicated core power effects. This code tries
// to provoke some power transitions that don't otherwise happen.
// While it's at it, it lightly checks results, but that's not the central
// goal. ToDo: maybe toughen the correctness checking.
//
// The power policies are governed by a number of opaque parameters; this code
// is based on a lot of guesses.
//
// Not thread safe.
class Avx {
public:
static bool can_do_avx();
static bool can_do_avx512f();
static bool can_do_fma();
Avx() {}
// Activate AVX depending on throw of the dice.
// Returns syndrome if computational error detected, empty string otherwise.
std::string MaybeGoHot();
// Does a bit of computing if in a "hot" mode.
// Returns syndrome if computational error detected, empty string otherwise.
std::string BurnIfAvxHeavy();
private:
constexpr static int kIterations = 5000;
std::string Avx256(int rounds);
std::string Avx256FMA(int rounds);
std::string Avx512(int rounds);
int level_ = 0;
std::knuth_b rng_;
};
#endif // THIRD_PARTY_CPU_CHECK_AVX_H_

56
compressor.cc Normal file
View File

@@ -0,0 +1,56 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "compressor.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include <zlib.h>
namespace cpu_check {
absl::Status Zlib::Compress(const MalignBuffer &m,
MalignBuffer *compressed) const {
uLongf olen = compressBound(m.size());
compressed->resize(olen);
int err = compress2(reinterpret_cast<Bytef *>(compressed->data()), &olen,
reinterpret_cast<const Bytef *>(m.data()), m.size(),
Z_BEST_SPEED);
if (err != Z_OK) {
return absl::Status(
absl::StatusCode::kInternal,
absl::StrFormat("Zlib compression failed: %d srcLen: %d destLen: %d",
err, m.size(), olen));
}
compressed->resize(olen);
return absl::OkStatus();
}
absl::Status Zlib::Decompress(const MalignBuffer &compressed,
MalignBuffer *m) const {
uLongf olen = m->size();
int err = uncompress(reinterpret_cast<Bytef *>(m->data()), &olen,
reinterpret_cast<const Bytef *>(compressed.data()),
compressed.size());
if (err != Z_OK) {
return absl::Status(
absl::StatusCode::kInternal,
absl::StrFormat("Zlib decompression failed: %d srcLen: %d destLen: %d",
err, compressed.size(), olen));
}
m->resize(olen);
return absl::OkStatus();
}
}; // namespace cpu_check

49
compressor.h Normal file
View File

@@ -0,0 +1,49 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THIRD_PARTY_CPU_CHECK_COMPRESSOR_H_
#define THIRD_PARTY_CPU_CHECK_COMPRESSOR_H_
#include <string>
#include "malign_buffer.h"
#include "absl/status/status.h"
namespace cpu_check {
class Compressor {
public:
virtual ~Compressor() {}
virtual std::string Name() const = 0;
// Compresses 'm' into 'compressed'.
virtual absl::Status Compress(const MalignBuffer &m,
MalignBuffer *compressed) const = 0;
// Decompresses 'compressed' into 'm'.
virtual absl::Status Decompress(const MalignBuffer &compressed,
MalignBuffer *m) const = 0;
};
class Zlib : public Compressor {
public:
std::string Name() const override { return "ZLIB"; }
absl::Status Compress(const MalignBuffer &m,
MalignBuffer *compressed) const override;
absl::Status Decompress(const MalignBuffer &compressed,
MalignBuffer *m) const override;
};
}; // namespace cpu_check
#endif // THIRD_PARTY_CPU_CHECK_COMPRESSOR_H_

View File

@@ -1,8 +1,2 @@
#define cpu_check_VERSION "@cpu_check_VERSION@"
// Begin Google local change
#cmakedefine IN_GOOGLE3
#cmakedefine USE_BORINGSSL
#cmakedefine VENDORS_AMD_PATH
#cmakedefine VENDORS_INTEL_PATH
// End Google local change

File diff suppressed because it is too large Load Diff

112
crypto.cc Normal file
View File

@@ -0,0 +1,112 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "crypto.h"
#include "config.h"
#include "absl/status/status.h"
namespace cpu_check {
namespace {
constexpr unsigned char key[33] = "0123456789abcdef0123456789abcdef";
}; // namespace
absl::Status Crypto::Encrypt(const MalignBuffer &plain_text,
MalignBuffer *cipher_text, CryptoPurse *purse) {
memset(purse->i_vec, 0, sizeof(purse->i_vec));
memcpy(purse->i_vec, plain_text.data(),
std::min(plain_text.size(), sizeof(purse->i_vec)));
int enc_len = 0;
int enc_unused_len = 0;
EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new();
EVP_CipherInit_ex(cipher_ctx, EVP_aes_256_gcm(), NULL, key, purse->i_vec, 1);
if (EVP_CipherUpdate(
cipher_ctx,
reinterpret_cast<unsigned char *>(cipher_text->data()),
&enc_len, reinterpret_cast<const unsigned char *>(plain_text.data()),
plain_text.size()) != 1) {
return ReturnError("EVP_CipherUpdate", cipher_ctx);
}
if (EVP_CipherFinal_ex(cipher_ctx, nullptr, &enc_unused_len) != 1) {
return ReturnError("encrypt_EVP_CipherFinal_ex", cipher_ctx);
}
enc_len += enc_unused_len;
if (enc_len != (int)cipher_text->size()) {
return ReturnError("encrypt_length_mismatch", cipher_ctx);
}
if (EVP_CIPHER_CTX_ctrl(cipher_ctx, EVP_CTRL_GCM_GET_TAG,
sizeof(purse->gmac_tag), purse->gmac_tag) != 1) {
return ReturnError("EVP_CTRL_GCM_GET_TAG", cipher_ctx);
}
EVP_CIPHER_CTX_free(cipher_ctx);
return absl::OkStatus();
}
absl::Status Crypto::Decrypt(const MalignBuffer &cipher_text,
const CryptoPurse &purse,
MalignBuffer *plain_text) {
int dec_len = 0;
int dec_extra_len = 0;
EVP_CIPHER_CTX *cipher_ctx = EVP_CIPHER_CTX_new();
EVP_CipherInit_ex(cipher_ctx, EVP_aes_256_gcm(), NULL, key, purse.i_vec, 0);
// Make a non-const copy of gmac_tag because that's what EVP_CIPHER_CTX_ctrl
// requires, even though it won't be modified in this use.
unsigned char copied_tag[sizeof(purse.gmac_tag)];
memcpy(copied_tag, purse.gmac_tag, sizeof(purse.gmac_tag));
if (EVP_CIPHER_CTX_ctrl(cipher_ctx, EVP_CTRL_GCM_SET_TAG, sizeof(copied_tag),
reinterpret_cast<void *>(copied_tag)) != 1) {
return ReturnError("EVP_CTRL_GCM_SET_TAG", cipher_ctx);
}
if (EVP_CipherUpdate(
cipher_ctx, reinterpret_cast<unsigned char *>(plain_text->data()),
&dec_len, reinterpret_cast<const unsigned char *>(cipher_text.data()),
cipher_text.size()) != 1) {
return ReturnError("Decryption", cipher_ctx);
}
if (EVP_CipherFinal_ex(
cipher_ctx,
reinterpret_cast<unsigned char *>(plain_text->data() + dec_len),
&dec_extra_len) != 1) {
return ReturnError("decrypt_EVP_CipherFinal_ex", cipher_ctx);
}
dec_len += dec_extra_len;
if (dec_len != (int)plain_text->size()) {
return ReturnError("decrypt_length_mismatch", cipher_ctx);
}
EVP_CIPHER_CTX_free(cipher_ctx);
return absl::OkStatus();
}
absl::Status Crypto::SelfTest() {
#ifdef USE_BORINGSSL
if (BORINGSSL_self_test() == 0) {
return absl::Status(absl::StatusCode::kInternal, "BORINGSSL_self_test");
}
#endif
return absl::OkStatus();
}
absl::Status Crypto::ReturnError(absl::string_view message,
EVP_CIPHER_CTX *cipher_ctx) {
EVP_CIPHER_CTX_free(cipher_ctx);
return absl::Status(absl::StatusCode::kInternal, message);
}
}; // namespace cpu_check

54
crypto.h Normal file
View File

@@ -0,0 +1,54 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THIRD_PARTY_CPU_CHECK_CRYPTO_H_
#define THIRD_PARTY_CPU_CHECK_CRYPTO_H_
#include "malign_buffer.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include <openssl/crypto.h>
#include <openssl/evp.h>
namespace cpu_check {
class Crypto {
public:
// Encryption produces these values, which are consumed by decryption.
struct CryptoPurse {
unsigned char i_vec[12];
unsigned char gmac_tag[16];
};
// Encrypts 'plain_text' to 'cipher_text' and stores i_vec and gmac
// in 'purse'.
static absl::Status Encrypt(const MalignBuffer &plain_text,
MalignBuffer *cipher_text, CryptoPurse *purse);
// Decrypts 'cipher_text' into 'plain_text' using i_vec and gmac from 'purse'.
static absl::Status Decrypt(const MalignBuffer &cipher_text,
const CryptoPurse &purse,
MalignBuffer *plain_text);
// Runs crypto self test, if available.
static absl::Status SelfTest();
private:
// Returns kInternal error and frees context 'cipher_ctx'.
static absl::Status ReturnError(absl::string_view message,
EVP_CIPHER_CTX *cipher_ctx);
};
}; // namespace cpu_check
#endif // THIRD_PARTY_CPU_CHECK_CRYPTO_H_

95
hasher.cc Normal file
View File

@@ -0,0 +1,95 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "hasher.h"
#include "crc32c.h"
#include "utils.h"
#include "third_party/farmhash/src/farmhash.h"
#include <openssl/crypto.h>
#include <openssl/evp.h>
#include <zlib.h>
namespace cpu_check {
namespace {
std::string OpenSSL_Hash(const MalignBuffer &s, const EVP_MD *type) {
EVP_MD_CTX *ctx;
ctx = EVP_MD_CTX_create();
EVP_DigestInit_ex(ctx, type, nullptr);
std::string hash;
hash.resize(EVP_MD_CTX_size(ctx));
MalignBuffer::InitializeMemoryForSanitizer(hash.data(), EVP_MD_CTX_size(ctx));
EVP_DigestUpdate(ctx, s.data(), s.size());
EVP_DigestFinal_ex(ctx, (uint8_t *)&hash[0], nullptr);
EVP_MD_CTX_destroy(ctx);
return HexStr(hash);
}
} // namespace
std::string Md5::Hash(const MalignBuffer &b) const {
return OpenSSL_Hash(b, EVP_md5());
}
std::string Sha1::Hash(const MalignBuffer &b) const {
return OpenSSL_Hash(b, EVP_sha1());
}
std::string Sha256::Hash(const MalignBuffer &b) const {
return OpenSSL_Hash(b, EVP_sha256());
}
std::string Sha512::Hash(const MalignBuffer &b) const {
return OpenSSL_Hash(b, EVP_sha512());
}
std::string Adler32::Hash(const MalignBuffer &b) const {
uLong c = adler32(0, Z_NULL, 0);
c = adler32(c, reinterpret_cast<const Bytef *>(b.data()), b.size());
return HexData(reinterpret_cast<const char *>(&c), sizeof(c));
}
std::string Crc32::Hash(const MalignBuffer &b) const {
uLong c = crc32(0, Z_NULL, 0);
c = crc32(c, reinterpret_cast<const Bytef *>(b.data()), b.size());
return HexData(reinterpret_cast<const char *>(&c), sizeof(c));
}
std::string Crc32C::Hash(const MalignBuffer &b) const {
const uint32_t c = crc32c(b.data(), b.size());
return HexData(reinterpret_cast<const char *>(&c), sizeof(c));
}
std::string FarmHash64::Hash(const MalignBuffer &b) const {
const uint64_t c = util::Hash64(b.data(), b.size());
return HexData(reinterpret_cast<const char *>(&c), sizeof(c));
}
Hashers::Hashers() {
hashers_.emplace_back(new Md5);
hashers_.emplace_back(new Sha1);
hashers_.emplace_back(new Sha256);
hashers_.emplace_back(new Sha512);
hashers_.emplace_back(new Adler32);
hashers_.emplace_back(new Crc32);
hashers_.emplace_back(new Crc32C);
hashers_.emplace_back(new FarmHash64);
}
const Hasher &Hashers::RandomHasher(uint64_t seed) const {
std::knuth_b rng(seed);
const size_t k =
std::uniform_int_distribution<size_t>(0, hashers_.size() - 1)(rng);
return *hashers_[k];
}
} // namespace cpu_check

96
hasher.h Normal file
View File

@@ -0,0 +1,96 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THIRD_PARTY_CPU_CHECK_HASH_H_
#define THIRD_PARTY_CPU_CHECK_HASH_H_
#include <memory>
#include <string>
#include <vector>
#include "malign_buffer.h"
namespace cpu_check {
class Hasher {
public:
virtual ~Hasher() {}
virtual std::string Name() const = 0;
virtual std::string Hash(const MalignBuffer &b) const = 0;
};
class Md5 : public Hasher {
public:
std::string Name() const override { return "MD5"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Sha1 : public Hasher {
public:
std::string Name() const override { return "SHA1"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Sha256 : public Hasher {
public:
std::string Name() const override { return "SHA256"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Sha512 : public Hasher {
public:
std::string Name() const override { return "SHA512"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Adler32 : public Hasher {
public:
std::string Name() const override { return "ADLER32"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Crc32 : public Hasher {
public:
std::string Name() const override { return "CRC32"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Crc32C : public Hasher {
public:
std::string Name() const override { return "CRC32C"; }
std::string Hash(const MalignBuffer &b) const override;
};
class FarmHash64 : public Hasher {
public:
std::string Name() const override { return "FarmHash64"; }
std::string Hash(const MalignBuffer &b) const override;
};
class Hashers {
public:
Hashers();
// Returns a randomly selected hasher.
const Hasher &RandomHasher(uint64_t seed) const;
const std::vector<std::unique_ptr<Hasher>> &hashers() const {
return hashers_;
}
private:
std::vector<std::unique_ptr<Hasher>> hashers_;
};
} // namespace cpu_check
#endif // THIRD_PARTY_CPU_CHECK_HASH_H_

382
malign_buffer.cc Normal file
View File

@@ -0,0 +1,382 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "malign_buffer.h"
#include <cstddef>
#if defined(__i386__) || defined(__x86_64__)
#include <immintrin.h>
#endif
#include <sys/mman.h>
#include <unistd.h>
#include <cstdlib>
#include <iomanip>
#include <sstream>
#include "log.h"
#include "utils.h"
#undef HAS_FEATURE_MEMORY_SANITIZER
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
#define HAS_FEATURE_MEMORY_SANITIZER
#endif
#endif
#if defined(__i386__) || defined(__x86_64__)
#define X86_TARGET_ATTRIBUTE(s) __attribute__((target(s)))
#else
#define X86_TARGET_ATTRIBUTE(s)
#endif
namespace cpu_check {
namespace {
inline void __movsb(char *dst, const char *src, size_t size) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__("rep movsb"
: "+D"(dst), "+S"(src), "+c"(size)
:
: "memory");
#else
LOG(FATAL) << "Cannot rep;movsb";
#endif
}
inline void __stosb(void *dst, unsigned char c, size_t size) {
#if defined(__i386__) || defined(__x86_64__)
__asm__ __volatile__("rep stosb" : "+D"(dst), "+c"(size) : "a"(c) : "memory");
#else
LOG(FATAL) << "Cannot rep;stosb";
#endif
}
inline void __sse_128_memcpy(char *dst, const char *src, size_t size) {
#if (defined(__i386__) || defined(__x86_64__))
size_t blks = size / 16;
for (int i = 0; i < blks; i++) {
_mm_storeu_si128(
reinterpret_cast<__m128i *>(dst) + i,
_mm_loadu_si128(reinterpret_cast<const __m128i *>(src) + i));
}
memcpy(dst + blks * 16, src + blks * 16, size - blks * 16);
#else
LOG(FATAL) << "SSE not available";
#endif
}
X86_TARGET_ATTRIBUTE("avx")
inline void __avx_256_memcpy(char *dst, const char *src, size_t size) {
#if (defined(__i386__) || defined(__x86_64__))
size_t blks = size / 32;
for (int i = 0; i < blks; i++) {
_mm256_storeu_si256(
reinterpret_cast<__m256i *>(dst) + i,
_mm256_loadu_si256(reinterpret_cast<const __m256i *>(src) + i));
}
memcpy(dst + blks * 32, src + blks * 32, size - blks * 32);
#else
LOG(FATAL) << "x86 only";
#endif
}
X86_TARGET_ATTRIBUTE("avx512f")
inline void __avx_512_memcpy(char *dst, const char *src, size_t size) {
#if (defined(__i386__) || defined(__x86_64__))
size_t blks = size / 64;
for (int i = 0; i < blks; i++) {
_mm512_storeu_si512(
reinterpret_cast<__m512i *>(dst) + i,
_mm512_loadu_si512(reinterpret_cast<const __m512i *>(src) + i));
}
memcpy(dst + blks * 64, src + blks * 64, size - blks * 64);
#else
LOG(FATAL) << "x86 only";
#endif
}
} // namespace
size_t MalignBuffer::RoundUpToPageSize(size_t k) {
return ((k + kPageSize - 1) / kPageSize) * kPageSize;
}
// Helper to make MSAN happy. NOP if memory sanitizer is not enabled.
void MalignBuffer::InitializeMemoryForSanitizer(char *addr, size_t size) {
#ifdef HAS_FEATURE_MEMORY_SANITIZER
std::default_random_engine rnd;
std::uniform_int_distribution<int> dist(std::numeric_limits<char>::min(),
std::numeric_limits<char>::max());
for (size_t i = 0; i < size; i++) {
addr[i] = dist(rnd);
}
#endif
}
const size_t MalignBuffer::kPageSize = sysconf(_SC_PAGESIZE);
const size_t MalignBuffer::kCacheLineSize = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
std::string MalignBuffer::ToString(CopyMethod m) {
switch (m) {
case kMemcpy:
return "memcpy";
case kRepMov:
return "rep;mov";
case kSseBy128:
return "sse:128";
case kAvxBy256:
return "avx:256";
case kAvxBy512:
return "avx:512";
}
}
size_t MalignBuffer::RandomAlignment(uint64_t seed) {
std::knuth_b rng(seed);
return std::uniform_int_distribution<size_t>(0, kPageSize - 1)(rng);
}
MalignBuffer::MalignBuffer(size_t capacity)
: capacity_(capacity),
base_address_(
aligned_alloc(kPageSize, RoundUpToPageSize(capacity) + kPageSize)) {
if (base_address_ == nullptr) {
LOG(FATAL) << "Failed allocate for capacity: " << capacity;
}
// There are lots of places that use unitialized MalignBuffer. So just
// fill some pseudo-random bytes if cpu_check is compiled with msan.
InitializeMemoryForSanitizer(static_cast<char *>(base_address_), capacity_);
}
MalignBuffer::MalignBuffer(size_t alignment_offset, absl::string_view s)
: MalignBuffer(s.size() + alignment_offset) {
Initialize(alignment_offset, s.size());
CopyFrom(s, kMemcpy);
}
MalignBuffer::~MalignBuffer() { free(base_address_); }
void MalignBuffer::Initialize(size_t alignment_offset, size_t length) {
if (length > capacity_) {
LOG(FATAL) << "Length: " << length << " Capacity: " << capacity_;
}
if (alignment_offset >= kPageSize) {
LOG(FATAL) << "Alignment: " << alignment_offset
<< " PageSize: " << kPageSize;
}
alignment_offset_ = alignment_offset;
length_ = length;
buffer_address_ = static_cast<char *>(base_address_) + alignment_offset_;
}
void MalignBuffer::resize(size_t length) {
Initialize(alignment_offset_, length);
}
std::string MalignBuffer::CopyFrom(const MalignBuffer &that, CopyMethod m) {
CopyFrom(absl::string_view(that.data(), that.size()), m);
return Syndrome(that);
}
void MalignBuffer::CopyFrom(absl::string_view src, CopyMethod m) {
if (size() != src.size()) {
LOG(FATAL) << "this.size: " << size() << " src.size:" << src.size();
}
CopyFrom(0, src, m);
}
void MalignBuffer::CopyFrom(size_t pos, absl::string_view src, CopyMethod m) {
if (pos + src.size() > size()) {
LOG(FATAL) << "this.size: " << size() << " src.size:" << src.size()
<< " pos: " << pos;
}
switch (m) {
case kMemcpy:
// Assumes memcpy doesn't use rep;movsb; false in lots of environments.
memcpy(data() + pos, src.data(), src.size());
break;
case kRepMov:
__movsb(data() + pos, src.data(), src.size());
break;
case kSseBy128:
__sse_128_memcpy(data() + pos, src.data(), src.size());
break;
case kAvxBy256:
__avx_256_memcpy(data() + pos, src.data(), src.size());
break;
case kAvxBy512:
__avx_512_memcpy(data() + pos, src.data(), src.size());
break;
}
}
std::string MalignBuffer::Syndrome(const MalignBuffer &that) const {
std::stringstream s;
std::string syndrome = CorruptionSyndrome(that);
if (syndrome.empty()) return "";
s << syndrome << ", \"this\": \"" << static_cast<const void *>(data())
<< "\", "
<< "\"that\": \"" << static_cast<const void *>(that.data()) << "\"";
return s.str();
}
std::string MalignBuffer::CorruptionSyndrome(const MalignBuffer &that) const {
std::stringstream s;
if (size() != that.size()) {
s << Json("unequalSizeThis", size()) << ", "
<< Json("unequalSizeThat", that.size());
return s.str();
}
bool failed_memcmp = memcmp(data(), that.data(), that.size());
int wrong_bytes = 0;
int wrong_bits = 0;
int byte_faults = 0;
int first_wrong = INT_MAX;
int last_wrong = INT_MIN;
std::vector<int> lane_errors(8, 0);
for (size_t i = 0; i < size(); i++) {
unsigned char a = *(data() + i);
unsigned char b = *(that.data() + i);
unsigned char d = a ^ b;
if (d) {
first_wrong = std::min<int>(first_wrong, i);
last_wrong = std::max<int>(last_wrong, i);
byte_faults |= d;
wrong_bytes++;
wrong_bits += __builtin_popcount(d);
for (size_t i = 0; i < 8; i++) {
if ((d >> i) & 1) {
lane_errors[i]++;
}
}
}
}
if (wrong_bits || wrong_bytes) {
const int range_width = (last_wrong - first_wrong) + 1;
s << Json("cmpResult",
(failed_memcmp ? "Failed_Memcmp" : "**Passed_Memcmp**"))
<< ", " << Json("wrongByteCount", wrong_bytes) << ", "
<< Json("wrongBitCount", wrong_bits) << ", "
<< Json("corruptionWidth", range_width) << ", "
<< Json("corruptStart", first_wrong) << ", "
<< Json("corruptByteBitMask", byte_faults) << ", "
<< "\"byBitLane\": [";
for (size_t i = 0; i < 8; i++) {
if (i) s << ", ";
s << lane_errors[i];
}
s << " ] ";
// Dump up to 64 corrupted locations.
std::stringstream dump;
dump << " \"byteErrors\": [ " << std::hex;
uint64_t buf_a = 0;
uint64_t buf_b = 0;
for (size_t k = 0; k < std::min(64, range_width); k++) {
uint8_t a = *(data() + first_wrong + k);
uint8_t b = *(that.data() + first_wrong + k);
if (k) dump << ", ";
dump << "[ " << std::setw(2) << "\"0x" << static_cast<int>(a) << "\", "
<< std::setw(2) << "\"0x" << static_cast<int>(b) << "\" ";
buf_a = (buf_a >> 8) | static_cast<uint64_t>(a) << 56;
buf_b = (buf_b >> 8) | static_cast<uint64_t>(b) << 56;
if ((k >= 7) && (7 == ((first_wrong + k) % 8))) {
dump << ", " << CrackId(buf_a) << ", " << CrackId(buf_b);
buf_a = 0;
buf_b = 0;
}
dump << " ]";
}
dump << " ] ";
return s.str() + ", " + dump.str();
} else {
if (!failed_memcmp) return "";
return Json("cmpResult", "**Failed_Memcmp**");
}
}
std::string MalignBuffer::CrackId(uint64_t v) const {
std::stringstream s;
s << std::hex << " [\"0x" << std::setw(4) << (v >> 48) << "\", \"0x"
<< std::setw(6) << ((v >> 24) & 0xffffff) << "\", \"0x" << std::setw(6)
<< (v & 0xffffff) << "\"]";
return s.str();
}
void MalignBuffer::RandomFlush(std::knuth_b *rng) const {
#if defined(__i386__) || defined(__x86_64__)
// Note: no barriers used.
const char *p = buffer_address_ + alignment_offset_;
while (p < buffer_address_ + length_) {
if (std::uniform_int_distribution<int>(0, 1)(*rng)) {
__builtin_ia32_clflush(p);
}
p += kCacheLineSize;
}
#endif
}
std::string MalignBuffer::PunchedHole::ToString() const {
if (length) {
return JsonRecord("hole", Json("start", start) + ", " +
Json("length", length) + ", " +
Json("v", static_cast<int>(v)));
} else {
return JsonNull("hole");
}
}
void MalignBuffer::Memset(size_t offset, unsigned char v, size_t length,
bool use_rep_stos) {
if (use_rep_stos) {
__stosb(data() + offset, v, length);
} else {
memset(data() + offset, v, length);
}
}
void MalignBuffer::PunchHole(const PunchedHole &hole, bool use_rep_stos) {
if (hole.length) {
Memset(hole.start, hole.v, hole.length, use_rep_stos);
}
}
// Hints to the OS to release the buffer's memory.
void MalignBuffer::MadviseDontNeed() const {
// Round up the buffer start address to a page boundary.
intptr_t start = ((intptr_t)data() + kPageSize - 1) & ~(kPageSize - 1);
// Round down the buffer end address to a page boundary.
intptr_t end = ((intptr_t)(data() + size() - 1)) & ~(kPageSize - 1);
if (end > start) {
const size_t length = end - start;
if (madvise((char *)start, length, MADV_DONTNEED) == -1) {
LOG(WARN) << "tid "
<< " madvise(MADV_DONTNEED) failed: " << strerror(errno)
<< " length: " << length;
}
}
}
MalignBuffer::PunchedHole MalignBuffer::RandomPunchedHole(uint64_t seed) const {
std::knuth_b rng(seed);
MalignBuffer::PunchedHole hole;
hole.length = std::uniform_int_distribution<size_t>(
1, std::min<size_t>(length_, 8192))(rng);
hole.start =
std::uniform_int_distribution<size_t>(0, length_ - hole.length)(rng);
return hole;
}
} // namespace cpu_check

124
malign_buffer.h Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THIRD_PARTY_CPU_CHECK_MALIGN_BUFFER_H_
#define THIRD_PARTY_CPU_CHECK_MALIGN_BUFFER_H_
#include <random>
#include <string>
#include "absl/strings/string_view.h"
namespace cpu_check {
// Data buffer supporting various alignments, copy mechanisms, and verification
// methods.
class MalignBuffer {
public:
struct PunchedHole {
std::string ToString() const;
size_t start = 0;
size_t length = 0;
unsigned char v = 0x53;
};
enum CopyMethod {
kMemcpy,
kRepMov,
kSseBy128,
kAvxBy256,
kAvxBy512,
};
// Returns name of given CopyMethod.
static std::string ToString(CopyMethod m);
static const size_t kPageSize;
static const size_t kCacheLineSize;
// Returns a random alignment offset in range [0..kPageSize-1].
static size_t RandomAlignment(uint64_t seed);
// Helper to make MSAN happy. NOP if memory sanitizer is not enabled.
static void InitializeMemoryForSanitizer(char* addr, size_t size);
// Constructs MalignBuffer of specified capacity.
MalignBuffer(size_t capacity);
// Constructs and initializes MalignBuffer with specified alignment and
// content. Useful for unit tests.
// REQUIRES:
// alignment_offset < kPageSize
MalignBuffer(size_t alignment_offset, absl::string_view s);
~MalignBuffer();
// Initializes buffer to specified alignment.
// REQUIRES:
// alignment_offset < kPageSize
// length <= this.capacity_.
void Initialize(size_t alignment_offset, size_t length);
const char* data() const { return buffer_address_; }
char* data() { return buffer_address_; }
size_t size() const { return length_; }
// REQUIRES length <= capacity_.
void resize(size_t length);
// Compares 'this' to 'that' returning empty string if identical.
// If not identical, returns a syndrome, currently Hamming distance,
// corrupted subrange bounds, and the diffs.
std::string Syndrome(const MalignBuffer& that) const;
// Validated data copy from source to 'this'.
// 'this' must be appropriately sized.
// Returns syndrome upon copy failure.
std::string CopyFrom(const MalignBuffer& that, CopyMethod m);
// Unvalidated copy to 'this'.
void CopyFrom(absl::string_view src, CopyMethod m);
void CopyFrom(size_t pos, absl::string_view src, CopyMethod m);
// Randomly flushes cache lines.
void RandomFlush(std::knuth_b* rng) const;
// Conventional or rep;sto memset operation, according to 'use_rep_stos'.
void Memset(size_t offset, unsigned char v, size_t length, bool use_rep_stos);
// Memsets buffer to 'hole.v', using rep;stos operation if
// 'use_rep_stos' set;
void PunchHole(const PunchedHole& hole, bool use_rep_stos);
// Hints to the OS to release the buffer's memory.
void MadviseDontNeed() const;
// Returns random PunchedHole within 'this'.
MalignBuffer::PunchedHole RandomPunchedHole(uint64_t seed) const;
private:
static size_t RoundUpToPageSize(size_t k);
std::string CorruptionSyndrome(const MalignBuffer& that) const;
std::string CrackId(uint64_t) const;
const size_t capacity_;
void* base_address_ = nullptr;
size_t alignment_offset_ = 0;
size_t length_ = 0; // Usable length
char* buffer_address_ = nullptr;
};
} // namespace cpu_check
#endif // THIRD_PARTY_CPU_CHECK_MALIGN_BUFFER_H_

224
pattern_generator.cc Normal file
View File

@@ -0,0 +1,224 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "pattern_generator.h"
#include <unistd.h>
#include <cmath>
#include <cstdint>
#include <fstream>
#include "log.h"
namespace cpu_check {
namespace {
// So-called Logistic Map with parameter 4.0.
// Floating point approximation aside, if v is in the closed unit interval than
// ChaoticF1(v) is in the closed unit interval.
template <typename T>
T ChaoticF1(T v) {
return 4.0 * v * (1.0 - v);
}
// Reciprocal-like function valid over closed unit interval.
template <typename T>
T Recip(T v) {
return 1.0 / (v + 0.1);
}
// Inverse of Recip for v in closed unit interval.
template <typename T>
T Unrecip(T v) {
return (1.0 / v) - 0.1;
}
template <typename T>
T ReciprocatedChaos(T v) {
return Recip(ChaoticF1(Unrecip(v)));
}
std::vector<std::string> ReadDict() {
// Dictionary search paths
static const char* dicts[] = {
"/usr/share/dict/words",
"words",
};
std::vector<std::string> words;
std::ifstream f;
for (const auto& d : dicts) {
f.open(d, std::ifstream::in);
if (f.is_open()) break;
f.clear();
}
if (!f.is_open()) return words;
LOG(DEBUG) << "Reading words.";
std::string word;
while (!f.eof()) {
std::getline(f, word);
words.push_back(word);
}
f.close();
LOG(DEBUG) << "Read " << words.size() << " words.";
std::sort(words.begin(), words.end(),
[](const std::string& a, const std::string& b) {
return a.size() < b.size();
});
return words;
}
} // namespace
FloatingPointResults FillBufferSystematic::Generate(
uint64_t round, MalignBuffer::CopyMethod copy_method, bool use_repstos,
bool exercise_floating_point, MalignBuffer* b) const {
const uint64_t pid = getpid();
// Format: 2 bytes of PID, 3 bytes of round number, 3 bytes of offset.
// Note: Perhaps should be AC-modulated. Perhaps should be absolute aligned
// for easier recognition.
// Note: appropriate for LE machines only.
FloatingPointResults fp;
fp.d = std::max<uint64_t>(round, 2);
for (size_t i = 0; i * 8 < b->size(); i++) {
const size_t p = 8 * i;
const size_t k = std::min<size_t>(8, b->size() - p);
const uint64_t v =
((pid & 0xffff) << 48) | ((round & 0xffffff) << 24) | (i & 0xffffff);
for (size_t m = 0; m < k; m++) {
(b->data())[p + m] = *(reinterpret_cast<const char*>(&v) + m);
}
if (exercise_floating_point) {
fp.d = ReciprocatedChaos<double>(fp.d);
}
}
return fp;
}
FloatingPointResults FillBufferRandom::Generate(
uint64_t round, MalignBuffer::CopyMethod copy_method, bool use_repstos,
bool exercise_floating_point, MalignBuffer* b) const {
std::knuth_b rng(round);
std::uniform_int_distribution<uint64_t> dist(
0, std::numeric_limits<uint64_t>::max());
FloatingPointResults fp;
fp.f = std::max<uint64_t>(round, 2);
size_t p = 0;
const size_t length = b->size();
// Repeatedly append random number (one to eight) random bytes.
while (p < length) {
const size_t max_span = std::min<size_t>(length - p, 8);
const size_t z = std::uniform_int_distribution<size_t>(1, max_span)(rng);
const uint64_t v = dist(rng);
b->CopyFrom(p, absl::string_view(reinterpret_cast<const char*>(&v), z),
copy_method);
p += z;
if (exercise_floating_point) {
fp.f = ReciprocatedChaos<float>(fp.f);
}
}
return fp;
}
FloatingPointResults FillBufferText::Generate(
uint64_t round, MalignBuffer::CopyMethod copy_method, bool use_repstos,
bool exercise_floating_point, MalignBuffer* b) const {
std::knuth_b rng(round);
std::exponential_distribution<double> dist(20);
FloatingPointResults fp;
fp.ld = std::max<uint64_t>(round, 2);
const size_t bufsize = b->size();
size_t pos = 0;
while (pos < bufsize) {
const size_t r = std::min(static_cast<size_t>(dist(rng) * words_.size()),
words_.size() - 1);
const auto& word = words_[r];
const size_t wordlen = word.size();
if (pos + wordlen >= bufsize) {
break;
}
b->CopyFrom(pos, word, copy_method);
pos += wordlen;
if (pos < bufsize) {
b->Memset(pos, ' ', 1, use_repstos);
pos++;
}
if (exercise_floating_point) {
fp.ld = ReciprocatedChaos<long double>(fp.ld);
}
}
// Pad with spaces
b->Memset(pos, ' ', bufsize - pos, use_repstos);
return fp;
}
FloatingPointResults FillBufferGrilledCheese::Generate(
uint64_t round, MalignBuffer::CopyMethod copy_method, bool use_repstos,
bool exercise_floating_point, MalignBuffer* b) const {
std::knuth_b rng(round);
FloatingPointResults fp;
fp.f = std::max<uint64_t>(round, 2);
fp.d = std::max<uint64_t>(round, 2);
const size_t kAdvance = 15;
const size_t kWindow = 64;
unsigned char flavor = 0;
b->Memset(0, 0, b->size(), use_repstos);
for (int base = kWindow; base < b->size(); base += kAdvance) {
if (std::uniform_int_distribution<int>(0, 1)(rng)) continue;
flavor++;
const size_t start =
std::uniform_int_distribution<size_t>(base - kWindow, base)(rng);
const size_t end = std::uniform_int_distribution<int>(start, base)(rng);
b->Memset(start, flavor, 1 + end - start, use_repstos);
if (exercise_floating_point) {
fp.f = ReciprocatedChaos<float>(fp.f);
fp.d = ReciprocatedChaos<double>(fp.d);
}
}
return fp;
}
PatternGenerators::PatternGenerators() : words_(ReadDict()) {
if (words_.empty()) {
LOG(ERROR) << "No word list found.";
exit(1);
}
generators_.emplace_back(new FillBufferSystematic());
generators_.emplace_back(new FillBufferRandom());
generators_.emplace_back(new FillBufferText(words_));
generators_.emplace_back(new FillBufferGrilledCheese());
}
const PatternGenerator& PatternGenerators::RandomGenerator(
uint64_t round) const {
std::knuth_b rng(round);
const size_t k =
std::uniform_int_distribution<size_t>(0, generators_.size() - 1)(rng);
return *generators_[k];
}
FloatingPointResults PatternGenerators::Generate(
const PatternGenerator& generator, const MalignBuffer::PunchedHole& hole,
uint64_t round, MalignBuffer::CopyMethod copy_method, bool use_repstos,
bool exercise_floating_point, MalignBuffer* b) const {
const FloatingPointResults f = generator.Generate(
round, copy_method, use_repstos, exercise_floating_point, b);
b->PunchHole(hole, use_repstos);
return f;
}
} // namespace cpu_check

123
pattern_generator.h Normal file
View File

@@ -0,0 +1,123 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THIRD_PARTY_CPU_CHECK_PATTERN_GENERATOR_H_
#define THIRD_PARTY_CPU_CHECK_PATTERN_GENERATOR_H_
#include <memory>
#include <string>
#include "malign_buffer.h"
namespace cpu_check {
class PatternGenerators;
struct FloatingPointResults {
bool operator==(const FloatingPointResults& other) const {
return f == other.f && d == other.d && ld == other.ld;
}
bool operator!=(const FloatingPointResults& other) const {
return f != other.f || d != other.d || ld != other.ld;
}
float f = 0.0;
double d = 0.0;
long double ld = 0.0;
};
class PatternGenerator {
public:
virtual ~PatternGenerator() {}
virtual std::string Name() const = 0;
virtual FloatingPointResults Generate(uint64_t round,
MalignBuffer::CopyMethod copy_method,
bool use_repstos,
bool exercise_floating_point,
MalignBuffer*) const = 0;
};
// Fills buffer with a systematic pattern.
// Returns iterate of chaotic floating point function of 'seed', with some
// reciprocal torture.
class FillBufferSystematic : public PatternGenerator {
public:
std::string Name() const override { return "Systematic"; }
FloatingPointResults Generate(uint64_t round,
MalignBuffer::CopyMethod copy_method,
bool use_repstos, bool exercise_floating_point,
MalignBuffer*) const override;
};
// Fills buffer with a random pattern.
// Returns iterate of chaotic floating point function of 'seed', with some
// reciprocal torture.
class FillBufferRandom : public PatternGenerator {
public:
std::string Name() const override { return "Random"; }
FloatingPointResults Generate(uint64_t round,
MalignBuffer::CopyMethod copy_method,
bool use_repstos, bool exercise_floating_point,
MalignBuffer*) const override;
};
// Fills buffer with a compressible pattern.
// Returns iterate of chaotic floating point function of 'seed', with some
// reciprocal torture.
class FillBufferText : public PatternGenerator {
public:
FillBufferText(const std::vector<std::string>& words) : words_(words) {}
std::string Name() const override { return "Text"; }
FloatingPointResults Generate(uint64_t round,
MalignBuffer::CopyMethod copy_method,
bool use_repstos, bool exercise_floating_point,
MalignBuffer*) const override;
private:
const std::vector<std::string>& words_;
};
// memset (conventional or rep;stos) randomly aligned, random width, randomly
// overlapped stretches of buffer. Constants aim to hit multiple times in
// cache lines and buffers. Untuned and based on nothing but hunches.
class FillBufferGrilledCheese : public PatternGenerator {
public:
std::string Name() const override { return "Cheese"; }
FloatingPointResults Generate(uint64_t round,
MalignBuffer::CopyMethod copy_method,
bool use_repstos, bool exercise_floating_point,
MalignBuffer*) const override;
};
class PatternGenerators {
public:
PatternGenerators();
const PatternGenerator& RandomGenerator(uint64_t round) const;
FloatingPointResults Generate(const PatternGenerator& generator,
const MalignBuffer::PunchedHole& hole,
uint64_t round,
MalignBuffer::CopyMethod copy_method,
bool use_repstos, bool exercise_floating_point,
MalignBuffer* b) const;
const std::vector<std::string>& words() const { return words_; }
private:
const std::vector<std::string> words_;
std::vector<std::unique_ptr<PatternGenerator>> generators_;
};
} // namespace cpu_check
#endif // THIRD_PARTY_CPU_CHECK_PATTERN_GENERATOR_H_

91
silkscreen.cc Normal file
View File

@@ -0,0 +1,91 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "silkscreen.h"
#include <unistd.h>
#include <random>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "utils.h"
namespace cpu_check {
static const size_t kPageSize = sysconf(_SC_PAGESIZE);
Silkscreen::Silkscreen(const std::vector<int> &tid_list)
: buffer_address_(static_cast<char *>(aligned_alloc(
kPageSize, kPageSize * ((kSize + kPageSize - 1) / kPageSize)))) {
std::knuth_b rng;
std::uniform_int_distribution<size_t> dist(0, tid_list.size() - 1);
for (size_t k = 0; k < kSize; k++) {
size_t w = dist(rng);
const int o = tid_list[w];
slot_count_[o]++;
owner_.push_back(o);
}
}
absl::Status Silkscreen::WriteMySlots(int tid, uint64_t round) {
uint64_t j = 0;
for (size_t k = 0; k < kSize; k++) {
if (owner(k) == tid) {
*data(k) = static_cast<char>(round);
j++;
}
}
if (j != slot_count_[tid]) {
std::string err = absl::StrCat(Json("written", j), ", ",
Json("expected", slot_count_[tid]));
return absl::Status(absl::StatusCode::kInternal, err);
}
return absl::OkStatus();
}
// When Silkscreen fails, it often fails, on several bad machines,
// in a surprising way: all slots owned by reading tid are
// are corrupt, in a way that suggests the previous round's
// writes never happened. Weird, and deserves some study, but
// meanwhile the log spew is suppressed by reporting only the last
// error and the error count.
absl::Status Silkscreen::CheckMySlots(int tid, uint64_t round) const {
const char expected = static_cast<char>(round);
uint64_t slots_read = 0;
uint64_t error_count = 0;
std::string last_error;
for (size_t k = 0; k < Silkscreen::kSize; k++) {
if (owner(k) != tid) continue;
slots_read++;
const char v = *data(k);
if (v == expected) continue;
error_count++;
last_error = absl::StrCat(Json("position", k), ", ", Json("is", v), ", ",
Json("expected", expected));
}
if (slot_count(tid) != slots_read) {
last_error = absl::StrCat(Json("read", slots_read), ", ",
Json("expected", slot_count(tid)));
error_count++;
}
if (error_count > 0) {
return absl::Status(
absl::StatusCode::kInternal,
absl::StrCat(last_error, ", ", Json("errors", error_count)));
} else {
return absl::OkStatus();
}
}
} // namespace cpu_check

63
silkscreen.h Normal file
View File

@@ -0,0 +1,63 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <map>
#include <vector>
#include "absl/status/status.h"
#ifndef THIRD_PARTY_CPU_CHECK_SILKSCREEN_H_
#define THIRD_PARTY_CPU_CHECK_SILKSCREEN_H_
namespace cpu_check {
// Rudimentary coherence/uncore tester.
// Randomly assigns each slot of a seemingly shared buffer to a single tid,
// creating only "false sharing".
// Thus each slot, regardless of alignment, must obey program order unless the
// machine is broken.
// To be toughened, e.g.:
// Widen the slots a bit
// Control the sharing more tightly, e.g. each cache line split between 2 tids
// Maybe checksum the indices to distinguish core-local compute errors from
// coherence errors, but that's perhaps easier said than done effectively.
// As it stands, it may be particularly hard to localize failures. Though that's
// always going to be a bit hard, which is the point. One technique might be
// to leave this alone and to run on subsets of cores and sockets.
class Silkscreen {
public:
static constexpr size_t kSize = 1000 * 1000; // Size of buffer
Silkscreen(const std::vector<int>& tid_list);
~Silkscreen() { free(buffer_address_); }
// Writes value derived from 'round' into all slots owned by 'tid'.
// Returns non-OK Status with JSON-formatted message upon error.
absl::Status WriteMySlots(int tid, uint64_t round);
// Checks all slots owned by 'tid' for value appropriate to 'round'.
// Returns non-OK Status with JSON-formatted message upon error.
absl::Status CheckMySlots(int tid, uint64_t round) const;
private:
int owner(size_t k) const { return owner_[k]; }
size_t size() const { return owner_.size(); }
int slot_count(int owner) const { return slot_count_.at(owner); }
const char* data(size_t k) const { return buffer_address_ + k; }
char* data(size_t k) { return buffer_address_ + k; }
std::vector<uint16_t> owner_; // const after initialization
std::map<int, int> slot_count_; // const after initialization
char* const buffer_address_;
};
} // namespace cpu_check
#endif // THIRD_PARTY_CPU_CHECK_SILKSCREEN_H_

58
stopper.h Normal file
View File

@@ -0,0 +1,58 @@
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef THIRD_PARTY_CPU_CHECK_STOPPER_H_
#define THIRD_PARTY_CPU_CHECK_STOPPER_H_
#include <unistd.h>
#include <atomic>
#include <cmath>
#include "utils.h"
class Stopper {
public:
// Infinite timeout if 'timeout' <= 0.
Stopper(int timeout)
: t_stop_(timeout <= 0 ? std::numeric_limits<double>::infinity()
: TimeInSeconds() + timeout) {}
// Returns true if time has expired or Stop has been invoked.
// Thread safe.
bool Expired() const { return stopped_ || TimeInSeconds() > t_stop_; }
// Sleeps for the minimum of 't' and remaining run time.
// Thread safe.
void BoundedSleep(int t) const {
if (std::isinf(t_stop_)) {
sleep(t);
} else {
const double remaining = t_stop_ - TimeInSeconds();
if (!stopped_ && remaining > 0) {
sleep(std::min<int>(t, ceil(remaining)));
}
}
}
// Causes timeout to expire now.
// Thread safe.
void Stop() { stopped_ = true; }
private:
const double t_stop_;
std::atomic_bool stopped_ = false;
};
#endif // THIRD_PARTY_CPU_CHECK_STOPPER_H_

View File

@@ -18,6 +18,7 @@
#include <unistd.h>
#include "log.h"
#include "absl/strings/str_cat.h"
static const std::string host_name = []() {
char host[256];
@@ -34,6 +35,21 @@ double TimeInSeconds() {
return ((tv.tv_sec * 1e6) + tv.tv_usec) / 1e6;
}
std::string HexData(const char* s, uint32_t l) {
const char d[16] = {'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
std::string o;
o.resize(l << 1);
for (uint32_t i = 0; i < l; i++) {
uint8_t b = s[i];
o[(i << 1)] = d[(b >> 4) & 0xf];
o[(i << 1) + 1] = d[b & 0xf];
}
return o;
}
std::string HexStr(const std::string& s) { return HexData(s.data(), s.size()); }
std::string Json(const std::string& field, int v) {
return "\"" + field + "\": " + std::to_string(v);
}
@@ -50,12 +66,12 @@ std::string JsonBool(const std::string& field, bool v) {
return "\"" + field + "\": " + (v ? "true" : "false");
}
std::string Json(const std::string& field, const std::string& v) {
return "\"" + field + "\": \"" + v + "\"";
std::string Json(const std::string& field, absl::string_view v) {
return absl::StrCat("\"", field, "\": \"", v, "\"");
}
std::string JsonRecord(const std::string& name, const std::string& v) {
return "\"" + name + "\": { " + v + " }";
std::string JsonRecord(const std::string& name, absl::string_view v) {
return absl::StrCat("\"", name, "\": { ", v, " }");
}
// Emits null field.

View File

@@ -17,15 +17,19 @@
#include <string>
#include "absl/strings/string_view.h"
double TimeInSeconds();
std::string HexData(const char* s, uint32_t l);
std::string HexStr(const std::string& s);
std::string Json(const std::string& field, int v);
std::string Json(const std::string& field, uint64_t v);
std::string Json(const std::string& field, double v);
std::string JsonBool(const std::string& field, bool v);
std::string Json(const std::string& field, const std::string& v);
std::string JsonRecord(const std::string& name, const std::string& v);
std::string Json(const std::string& field, absl::string_view v);
std::string JsonRecord(const std::string& name, absl::string_view v);
// Emits null field.
std::string JsonNull(const std::string& field);