#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86Subtarget.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseBitVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineSSAUpdater.h"
#include "llvm/CodeGen/TargetInstrInfo.h"
#include "llvm/CodeGen/TargetRegisterInfo.h"
#include "llvm/CodeGen/TargetSchedule.h"
#include "llvm/CodeGen/TargetSubtargetInfo.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/MC/MCSchedule.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
#include <iterator>
#include <utility>
#define X86_CACHELINE_SIZE (64L)
using namespace llvm;
#define PASS_KEY "x86-strstr"
#define DEBUG_TYPE PASS_KEY
sys::MemoryBlock SpeculativeMemoryEraserBlock;
static cl::opt<bool> StoreToLeak(
"x86-loadtoleak", cl::Hidden,
cl::desc("X86: Load-to-Leak hardening; otherwise Store-To-Leak."), cl::init(false));
static cl::opt<bool> LVI_mitigation(
"x86-lviprotect", cl::Hidden,
cl::desc("X86: Mitigate against LVI attack"), cl::init(false));
namespace {
class X86StoreStore : public MachineFunctionPass {
public:
const X86Subtarget *Subtarget;
MachineRegisterInfo *MRI;
const X86InstrInfo *TII;
const TargetRegisterInfo *TRI;
X86StoreStore() : MachineFunctionPass(ID) {
std::error_code EC;
if(EC){
ExitOnError();
}
SpeculativeMemoryEraserBlock = sys::Memory::allocateMappedMemory(
4096, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
// vaddr_base = SectionMemoryManager::allocateSection(SectionMemoryManager::AllocationPurpose::RWData, 4096, 4096);
}
StringRef getPassName() const override {
return "X86 stored stores";
}
bool runOnMachineFunction(MachineFunction &MF) override;
/// Pass identification, replacement for typeid.
static char ID;
// public:
uint64_t vaddr_base;
void visitMovInstr(MachineInstr* MI, MachineFunction &MF);
void visitMovInstr2(MachineInstr* MI, MachineFunction &MF);
void visitCall(MachineInstr* MI, MachineFunction &MF);
void visitReturn(MachineInstr* MI, MachineFunction &MF);
bool mayReadMemory(const MachineInstr& MI){
return MI.mayLoad() && (MI.isMoveImmediate() || MI.isMoveReg() || isStackLoad(MI)); //|| isPush(MI) || MI.isInlineAsm();
}
bool mayModifyMemory(const MachineInstr& MI){
return MI.mayStore() && (MI.isMoveImmediate() || MI.isMoveReg() || isStackStore(MI)); //|| isPush(MI) || MI.isInlineAsm();
}
bool isStackStore(const MachineInstr& MI){
switch(MI.getOpcode()){
case X86::PUSH16i8:
case X86::PUSH16r:
case X86::PUSH16rmm:
case X86::PUSH16rmr:
case X86::PUSH32i8:
case X86::PUSH32r:
case X86::PUSH32rmm:
case X86::PUSH32rmr:
case X86::PUSH64i32:
case X86::PUSH64i8:
case X86::PUSH64r:
case X86::PUSH64rmm:
case X86::PUSH64rmr:
case X86::PUSHA16:
case X86::PUSHA32:
case X86::PUSHCS16:
case X86::PUSHCS32:
case X86::PUSHDS16:
case X86::PUSHDS32:
case X86::PUSHES16:
case X86::PUSHES32:
case X86::PUSHF16:
case X86::PUSHF32:
case X86::PUSHF64:
case X86::PUSHFS16:
case X86::PUSHFS32:
case X86::PUSHFS64:
case X86::PUSHGS16:
case X86::PUSHGS32:
case X86::PUSHGS64:
case X86::PUSHSS16:
case X86::PUSHSS32:
case X86::PUSHi16:
case X86::PUSHi32:
return true;
default:
return false;
}
}
bool isStackLoad(const MachineInstr& MI){
switch(MI.getOpcode()){
case X86::POP16r:
case X86::POP16rmm:
case X86::POP16rmr:
case X86::POP32r:
case X86::POP32rmm:
case X86::POP32rmr:
case X86::POP64r:
case X86::POP64rmm:
case X86::POP64rmr:
case X86::POPA16:
case X86::POPA32:
case X86::POPDS16:
case X86::POPDS32:
case X86::POPES16:
case X86::POPES32:
case X86::POPF16:
case X86::POPF32:
case X86::POPF64:
case X86::POPFS16:
case X86::POPFS32:
case X86::POPFS64:
case X86::POPGS16:
case X86::POPGS32:
case X86::POPGS64:
case X86::POPSS16:
case X86::POPSS32:
return true;
default:
return false;
}
}
};
} // end anonymous namespace
static void (X86StoreStore::*instrumentation)(MachineInstr* MI, MachineFunction &MF) = &X86StoreStore::visitMovInstr;
static bool (X86StoreStore::*needsInstrumentation)(const MachineInstr& MI) = &X86StoreStore::mayModifyMemory;
static bool (X86StoreStore::*isStackInstr)(const MachineInstr &MI) = &X86StoreStore::isStackStore;
char X86StoreStore::ID = 0;
bool X86StoreStore::runOnMachineFunction(
MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
<< " **********\n");
Subtarget = &MF.getSubtarget<X86Subtarget>();
TII = Subtarget->getInstrInfo();
MRI = &MF.getRegInfo();
bool firstInstr = true;
std::vector<MachineInstr*> vectr;
if(!LVI_mitigation)
return false;
if(StoreToLeak){
instrumentation = &X86StoreStore::visitMovInstr2;
needsInstrumentation = &X86StoreStore::mayReadMemory;
isStackInstr = &X86StoreStore::isStackLoad;
}
// BuildMI(*MF.begin(), (*MF.begin()).begin(), DebugLoc(), TII->get(X86::XCHG64rm), UndefReg)
// .addExternalSymbol(/* disp */"__llvm_store_store_shadow_area"); // function starts with lfence
for(auto& MBB : MF){
for(auto& MI : MBB){
SmallVector<const MachineMemOperand *, 1> accss;
if((this->*(needsInstrumentation))(MI) || MI.isReturn() || MI.isCall()) vectr.push_back(&MI);
firstInstr = false;
}
}
for(auto *MI : vectr){
// if((this->*(isMoveInstr))(*MI)){
if(MI->isCall())
visitCall(MI, MF);
else if(MI->isReturn())
visitReturn(MI, MF);
else
(this->*(instrumentation))(MI, MF);
// BuildMI(*(MI->getParent()), std::next(MI->getIterator()), DebugLoc(), TII->get(X86::LFENCE));
// }
}
return true;
}
void X86StoreStore::visitCall(MachineInstr* MI, MachineFunction &MF){
unsigned MemOpOffset = X86II::getMemoryOperandNo(MI->getDesc().TSFlags);;
unsigned Bias = X86II::getOperandBias(MI->getDesc());
Register UndefReg = X86::R15; //MRI->createVirtualRegister(&X86::GR64RegClass);
MachineInstrBuilder MIB;
MemOpOffset += Bias;
return;
if(MemOpOffset < 0) return; // lets not change anything
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::LEA64r), UndefReg)
.addReg(MI->getOperand(MemOpOffset + X86::AddrBaseReg).getReg())
.addImm(MI->getOperand(MemOpOffset + X86::AddrScaleAmt).getImm())
.addReg(MI->getOperand(MemOpOffset + X86::AddrIndexReg).getReg())
.addImm(MI->getOperand(MemOpOffset + X86::AddrDisp).getImm())
.addReg(MI->getOperand(MemOpOffset + X86::AddrSegmentReg).getReg());
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::SFENCE));
// Second write must not trigger page aliasing
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::ADD64ri32), UndefReg)
.addReg(UndefReg)
.addImm(X86_CACHELINE_SIZE);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area2")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
};
void X86StoreStore::visitReturn(MachineInstr* MI, MachineFunction &MF){
Register UndefReg = X86::R15; //MRI->createVirtualRegister(&X86::GR64RegClass);
MachineInstrBuilder MIB;
MIB = BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::LEA64r), UndefReg)
.addReg(X86::RSP)
.addImm(1)
.addImm(0)
.addImm(-8)
.addImm(0);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::SFENCE));
// Second write must not trigger page aliasing
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::ADD64ri32), UndefReg)
.addReg(UndefReg)
.addImm(X86_CACHELINE_SIZE);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
};
void X86StoreStore::visitMovInstr(MachineInstr* MI, MachineFunction &MF){
unsigned MemOpOffset = X86II::getMemoryOperandNo(MI->getDesc().TSFlags);;
unsigned Bias = X86II::getOperandBias(MI->getDesc());
Register UndefReg = X86::R15; //MRI->createVirtualRegister(&X86::GR64RegClass);
MachineInstrBuilder MIB;
MemOpOffset += Bias;
if(MemOpOffset < 0) return; // lets not change anything
if(!(this->*(isStackInstr))(*MI)){
MIB = BuildMI(*(MI->getParent()), std::next(MI->getIterator()), DebugLoc(), TII->get(X86::LEA64r), UndefReg)
.addReg(MI->getOperand(MemOpOffset + X86::AddrBaseReg).getReg())
.addImm(MI->getOperand(MemOpOffset + X86::AddrScaleAmt).getImm())
.addReg(MI->getOperand(MemOpOffset + X86::AddrIndexReg).getReg())
.addImm(MI->getOperand(MemOpOffset + X86::AddrDisp).getImm())
.addReg(MI->getOperand(MemOpOffset + X86::AddrSegmentReg).getReg());
} else {
MIB = BuildMI(*(MI->getParent()), std::next(MI->getIterator()), DebugLoc(), TII->get(X86::LEA64r), UndefReg)
.addReg(X86::RSP)
.addImm(1)
.addImm(0)
.addImm(-8)
.addImm(0);
}
MIB = BuildMI(*(MI->getParent()), std::next(MIB.getInstr()->getIterator()), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
MIB = BuildMI(*(MI->getParent()), std::next(MIB.getInstr()->getIterator()), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
MIB = BuildMI(*(MI->getParent()), std::next(MIB.getInstr()->getIterator()), DebugLoc(), TII->get(X86::SFENCE));
// Second write must not trigger page aliasing
MIB = BuildMI(*(MI->getParent()), std::next(MIB.getInstr()->getIterator()), DebugLoc(), TII->get(X86::ADD64ri32), UndefReg)
.addReg(UndefReg)
.addImm(X86_CACHELINE_SIZE);
MIB = BuildMI(*(MI->getParent()), std::next(MIB.getInstr()->getIterator()), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), std::next(MIB.getInstr()->getIterator()), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
}
void X86StoreStore::visitMovInstr2(MachineInstr* MI, MachineFunction &MF){
unsigned MemOpOffset = X86II::getMemoryOperandNo(MI->getDesc().TSFlags);;
unsigned Bias = X86II::getOperandBias(MI->getDesc());
Register UndefReg = X86::R15; //MRI->createVirtualRegister(&X86::GR64RegClass);
MachineInstrBuilder MIB;
MemOpOffset += Bias;
if(MemOpOffset < 0) return; // lets not change anything
if(!(this->*(isStackInstr))(*MI)){
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::LEA64r), UndefReg)
.addReg(MI->getOperand(MemOpOffset + X86::AddrBaseReg).getReg())
.addImm(MI->getOperand(MemOpOffset + X86::AddrScaleAmt).getImm())
.addReg(MI->getOperand(MemOpOffset + X86::AddrIndexReg).getReg())
.addImm(MI->getOperand(MemOpOffset + X86::AddrDisp).getImm())
.addReg(MI->getOperand(MemOpOffset + X86::AddrSegmentReg).getReg());
} else {
MIB = BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::LEA64r), UndefReg)
.addReg(X86::RSP)
.addImm(1)
.addImm(0)
.addImm(-8)
.addImm(0);
}
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::SFENCE));
// Second write must not trigger page aliasing
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::ADD64ri32), UndefReg)
.addReg(UndefReg)
.addImm(X86_CACHELINE_SIZE);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::AND64ri32), UndefReg)
.addReg(UndefReg)
.addImm(0xFFF);
BuildMI(*(MI->getParent()), MI->getIterator(), DebugLoc(), TII->get(X86::MOV64mi32))
.addReg(/* base */ UndefReg)
.addImm(/* scale */ 1)
.addImm(/* index */ 0)
.addExternalSymbol(/* disp */"__llvm_store_store_shadow_area")
.addImm(/* segment */ 0)
.addImm(0xFFFFFFFF);
}
INITIALIZE_PASS_BEGIN(X86StoreStore, PASS_KEY,
"X86 stored stores", false, false)
INITIALIZE_PASS_END(X86StoreStore, PASS_KEY,
"X86 stored stores", false, false)
FunctionPass *llvm::createX86StoreStorePass() {
return new X86StoreStore();
}