summaryrefslogtreecommitdiff
path: root/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lib/CodeGen/SelectionDAG/InstrEmitter.cpp')
-rw-r--r--lib/CodeGen/SelectionDAG/InstrEmitter.cpp12
1 files changed, 12 insertions, 0 deletions
diff --git a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
index 5927838b533..4d09c444945 100644
--- a/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
+++ b/lib/CodeGen/SelectionDAG/InstrEmitter.cpp
@@ -895,6 +895,7 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
// Remember to operand index of the group flags.
SmallVector<unsigned, 8> GroupIdx;
+ unsigned PrevDefGroup = 0;
// Add all of the operand registers to the instruction.
for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
@@ -943,9 +944,20 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned,
if (InlineAsm::getKind(Flags) == InlineAsm::Kind_RegUse) {
unsigned DefGroup = 0;
if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) {
+ // Check that the def groups are monotonically increasing.
+ // Otherwise, the tied uses and defs won't line up, and
+ // MI::findTiedOperandIdx() will find the wrong operand. This
+ // should be automatically enforced by the front ends when
+ // translating "+" constraints into tied def+use pairs.
+ assert(DefGroup >= PrevDefGroup &&
+ "Tied inline asm operands must be in increasing order.");
+ PrevDefGroup = DefGroup;
+
unsigned DefIdx = GroupIdx[DefGroup] + 1;
unsigned UseIdx = GroupIdx.back() + 1;
for (unsigned j = 0; j != NumVals; ++j) {
+ assert(!MI->getOperand(DefIdx + j).isTied() &&
+ "Def is already tied to another use");
MI->getOperand(DefIdx + j).setIsTied();
MI->getOperand(UseIdx + j).setIsTied();
}