From ae43cab6bab0e5bcdbe2971bf718712559625e39 Mon Sep 17 00:00:00 2001 From: Alp Toker Date: Fri, 24 Jan 2014 17:20:08 +0000 Subject: Fix known typos Sweep the codebase for common typos. Includes some changes to visible function names that were misspelt. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@200018 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm-c/IRReader.h | 2 +- include/llvm/CodeGen/ScheduleDAGInstrs.h | 2 +- include/llvm/CodeGen/TargetSchedule.h | 2 +- include/llvm/Support/Compression.h | 10 +-- include/llvm/Support/Memory.h | 2 +- .../llvm/Transforms/Utils/ASanStackFrameLayout.h | 2 +- lib/Analysis/BasicAliasAnalysis.cpp | 2 +- lib/Analysis/DependenceAnalysis.cpp | 2 +- lib/Analysis/ScalarEvolution.cpp | 8 +-- lib/CodeGen/CriticalAntiDepBreaker.cpp | 2 +- lib/CodeGen/MachineScheduler.cpp | 2 +- lib/CodeGen/ScheduleDAG.cpp | 2 +- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 6 +- lib/CodeGen/StackMaps.cpp | 2 +- lib/ExecutionEngine/IntelJITEvents/jitprofiling.h | 14 ++-- lib/ExecutionEngine/Interpreter/Execution.cpp | 2 +- lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp | 2 +- lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h | 2 +- lib/MC/MCParser/AsmParser.cpp | 8 +-- lib/Support/APFloat.cpp | 2 +- lib/Support/APInt.cpp | 2 +- lib/Support/CommandLine.cpp | 19 +++--- lib/Support/Path.cpp | 6 +- lib/Support/regcomp.c | 6 +- lib/Target/AArch64/AArch64InstrInfo.cpp | 3 +- lib/Target/AArch64/AArch64InstrNEON.td | 2 +- .../AArch64/Disassembler/AArch64Disassembler.cpp | 2 +- lib/Target/ARM/ARMISelDAGToDAG.cpp | 2 +- lib/Target/ARM/ARMISelLowering.cpp | 4 +- lib/Target/ARM/ARMRegisterInfo.td | 2 +- lib/Target/ARM/ARMScheduleSwift.td | 4 +- lib/Target/ARM/ARMTargetTransformInfo.cpp | 2 +- lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h | 3 +- lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp | 2 +- lib/Target/Hexagon/HexagonCopyToCombine.cpp | 4 +- lib/Target/Hexagon/HexagonHardwareLoops.cpp | 4 +- lib/Target/Hexagon/HexagonInstrInfo.cpp | 4 +- lib/Target/Mips/AsmParser/MipsAsmParser.cpp | 4 +- lib/Target/Mips/MipsMSAInstrInfo.td | 2 +- lib/Target/NVPTX/NVPTXISelLowering.cpp | 6 +- lib/Target/NVPTX/NVVMReflect.cpp | 4 +- lib/Target/PowerPC/PPCISelLowering.cpp | 2 +- lib/Target/R600/AMDGPU.h | 2 +- lib/Target/R600/AMDILCFGStructurizer.cpp | 2 +- lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp | 2 +- lib/Target/R600/R600ClauseMergePass.cpp | 2 +- lib/Target/R600/R600Defines.h | 2 +- lib/Target/R600/R600ISelLowering.cpp | 6 +- lib/Target/R600/R600ISelLowering.h | 2 +- lib/Target/R600/R600InstrInfo.h | 2 +- lib/Target/R600/R600Instructions.td | 2 +- lib/Target/R600/R600Packetizer.cpp | 2 +- lib/Target/R600/SIISelLowering.cpp | 2 +- lib/Target/R600/SIRegisterInfo.cpp | 2 +- lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h | 2 +- lib/Target/SystemZ/SystemZISelLowering.cpp | 2 +- lib/Target/SystemZ/SystemZInstrInfo.cpp | 2 +- lib/Target/SystemZ/SystemZInstrInfo.td | 2 +- .../X86/Disassembler/X86DisassemblerDecoder.h | 2 +- lib/Target/X86/X86FastISel.cpp | 2 +- lib/Target/X86/X86Schedule.td | 2 +- lib/Target/XCore/XCoreLowerThreadLocal.cpp | 4 +- lib/Transforms/IPO/ConstantMerge.cpp | 6 +- lib/Transforms/IPO/MergeFunctions.cpp | 2 +- lib/Transforms/InstCombine/InstCombineCalls.cpp | 4 +- lib/Transforms/InstCombine/InstCombineCasts.cpp | 4 +- lib/Transforms/InstCombine/InstCombineCompares.cpp | 4 +- .../InstCombine/InstCombineVectorOps.cpp | 2 +- .../InstCombine/InstructionCombining.cpp | 4 +- .../Instrumentation/AddressSanitizer.cpp | 2 +- lib/Transforms/Instrumentation/MemorySanitizer.cpp | 2 +- lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 2 +- lib/Transforms/ObjCARC/ObjCARCOpts.cpp | 14 ++-- lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 6 +- lib/Transforms/Utils/FlattenCFG.cpp | 2 +- lib/Transforms/Utils/SimplifyCFG.cpp | 6 +- lib/Transforms/Vectorize/LoopVectorize.cpp | 6 +- lib/Transforms/Vectorize/SLPVectorizer.cpp | 4 +- .../ScalarEvolution/2012-05-18-LoopPredRecurse.ll | 2 +- test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll | 18 +++--- test/Bindings/Ocaml/vmcore.ml | 20 +++--- test/CodeGen/ARM/struct_byval_arm_t1_t2.ll | 2 +- test/CodeGen/MSP430/misched-msp430.ll | 2 +- .../Mips/msa/llvm-stress-s449609655-simplified.ll | 2 +- test/CodeGen/SystemZ/fp-cmp-04.ll | 2 +- test/CodeGen/SystemZ/frame-08.ll | 4 +- test/CodeGen/SystemZ/int-cmp-44.ll | 2 +- test/CodeGen/SystemZ/int-cmp-45.ll | 2 +- .../X86/2013-10-14-FastISel-incorrect-vreg.ll | 8 +-- test/CodeGen/X86/block-placement.ll | 2 +- test/CodeGen/X86/load-slice.ll | 2 +- test/CodeGen/X86/shl_undef.ll | 2 +- test/DebugInfo/X86/tls.ll | 2 +- test/MC/ARM/eh-directive-setfp.s | 2 +- test/MC/ARM/elf-thumbfunc-reloc.ll | 2 +- test/MC/COFF/bss.s | 2 +- test/MC/COFF/section-name-encoding.s | 2 +- test/MC/MachO/bss.s | 2 +- test/Transforms/Inline/ptr-diff.ll | 2 +- test/Transforms/LoopVectorize/value-ptr-bug.ll | 2 +- test/Transforms/ObjCARC/allocas.ll | 4 +- tools/lli/RemoteMemoryManager.cpp | 2 +- tools/lli/RemoteTarget.h | 2 +- tools/lli/RemoteTargetExternal.h | 2 +- tools/llvm-objdump/COFFDump.cpp | 2 +- unittests/Support/FileOutputBufferTest.cpp | 2 +- unittests/Support/SwapByteOrderTest.cpp | 74 +++++++++++----------- utils/TableGen/CodeGenRegisters.h | 2 +- utils/TableGen/CodeGenSchedule.cpp | 2 +- utils/TableGen/CodeGenSchedule.h | 4 +- utils/wciia.py | 2 +- 111 files changed, 233 insertions(+), 232 deletions(-) diff --git a/include/llvm-c/IRReader.h b/include/llvm-c/IRReader.h index d0a23be0abf..5001afb7ed7 100644 --- a/include/llvm-c/IRReader.h +++ b/include/llvm-c/IRReader.h @@ -24,7 +24,7 @@ extern "C" { * Read LLVM IR from a memory buffer and convert it into an in-memory Module * object. Returns 0 on success. * Optionally returns a human-readable description of any errors that - * occured during parsing IR. OutMessage must be disposed with + * occurred during parsing IR. OutMessage must be disposed with * LLVMDisposeMessage. * * @see llvm::ParseIR() diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h index 4ae22ad2afe..378ac7b2b6d 100644 --- a/include/llvm/CodeGen/ScheduleDAGInstrs.h +++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h @@ -94,7 +94,7 @@ namespace llvm { /// The standard DAG builder does not normally include terminators as DAG /// nodes because it does not create the necessary dependencies to prevent - /// reordering. A specialized scheduler can overide + /// reordering. A specialized scheduler can override /// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate /// it has taken responsibility for scheduling the terminator correctly. bool CanHandleTerminators; diff --git a/include/llvm/CodeGen/TargetSchedule.h b/include/llvm/CodeGen/TargetSchedule.h index 19a172beeaa..4e178d057bb 100644 --- a/include/llvm/CodeGen/TargetSchedule.h +++ b/include/llvm/CodeGen/TargetSchedule.h @@ -158,7 +158,7 @@ public: /// model. /// /// Compute and return the expected latency of this instruction independent of - /// a particular use. computeOperandLatency is the prefered API, but this is + /// a particular use. computeOperandLatency is the preferred API, but this is /// occasionally useful to help estimate instruction cost. /// /// If UseDefaultDefLatency is false and no new machine sched model is diff --git a/include/llvm/Support/Compression.h b/include/llvm/Support/Compression.h index bef9146d07d..ea3962ecd9f 100644 --- a/include/llvm/Support/Compression.h +++ b/include/llvm/Support/Compression.h @@ -33,11 +33,11 @@ enum CompressionLevel { enum Status { StatusOK, - StatusUnsupported, // zlib is unavaliable - StatusOutOfMemory, // there was not enough memory - StatusBufferTooShort, // there was not enough room in the output buffer - StatusInvalidArg, // invalid input parameter - StatusInvalidData // data was corrupted or incomplete + StatusUnsupported, // zlib is unavailable + StatusOutOfMemory, // there was not enough memory + StatusBufferTooShort, // there was not enough room in the output buffer + StatusInvalidArg, // invalid input parameter + StatusInvalidData // data was corrupted or incomplete }; bool isAvailable(); diff --git a/include/llvm/Support/Memory.h b/include/llvm/Support/Memory.h index a08c79649d5..8251fcd96c4 100644 --- a/include/llvm/Support/Memory.h +++ b/include/llvm/Support/Memory.h @@ -95,7 +95,7 @@ namespace sys { /// memory was not allocated using the allocateMappedMemory method. /// \p Block describes the memory block to be protected. /// \p Flags specifies the new protection state to be assigned to the block. - /// \p ErrMsg [out] returns a string describing any error that occured. + /// \p ErrMsg [out] returns a string describing any error that occurred. /// /// If \p Flags is MF_WRITE, the actual behavior varies /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the diff --git a/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/include/llvm/Transforms/Utils/ASanStackFrameLayout.h index f5437208f91..4e4f02c84ec 100644 --- a/include/llvm/Transforms/Utils/ASanStackFrameLayout.h +++ b/include/llvm/Transforms/Utils/ASanStackFrameLayout.h @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This header defines ComputeASanStackFrameLayout and auxilary data structs. +// This header defines ComputeASanStackFrameLayout and auxiliary data structs. // //===----------------------------------------------------------------------===// #ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index 6adeedb2448..5f7dd98e193 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -154,7 +154,7 @@ static bool isObjectSize(const Value *V, uint64_t Size, /// isIdentifiedFunctionLocal - Return true if V is umabigously identified /// at the function-level. Different IdentifiedFunctionLocals can't alias. /// Further, an IdentifiedFunctionLocal can not alias with any function -/// arguments other than itself, which is not neccessarily true for +/// arguments other than itself, which is not necessarily true for /// IdentifiedObjects. static bool isIdentifiedFunctionLocal(const Value *V) { diff --git a/lib/Analysis/DependenceAnalysis.cpp b/lib/Analysis/DependenceAnalysis.cpp index f152aeb9de7..b74140db2cf 100644 --- a/lib/Analysis/DependenceAnalysis.cpp +++ b/lib/Analysis/DependenceAnalysis.cpp @@ -3178,7 +3178,7 @@ void DependenceAnalysis::updateDirection(Dependence::DVEntry &Level, /// Check if we can delinearize the subscripts. If the SCEVs representing the /// source and destination array references are recurrences on a nested loop, -/// this function flattens the nested recurrences into seperate recurrences +/// this function flattens the nested recurrences into separate recurrences /// for each loop level. bool DependenceAnalysis::tryDelinearize(const SCEV *SrcSCEV, const SCEV *DstSCEV, diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 064aafd01e4..b65d99e4d67 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -7143,7 +7143,7 @@ SCEVAddRecExpr::delinearize(ScalarEvolution &SE, const SCEV *Start = this->getStart(); const SCEV *Step = this->getStepRecurrence(SE); - // Build the SCEV representation of the cannonical induction variable in the + // Build the SCEV representation of the canonical induction variable in the // loop of this SCEV. const SCEV *Zero = SE.getConstant(this->getType(), 0); const SCEV *One = SE.getConstant(this->getType(), 1); @@ -7189,13 +7189,13 @@ SCEVAddRecExpr::delinearize(ScalarEvolution &SE, else Rem = Quotient; - // Scale up the cannonical induction variable IV by whatever remains from the + // Scale up the canonical induction variable IV by whatever remains from the // Step after division by the GCD: the GCD is the size of all the sub-array. if (Step != GCD) { Step = SCEVDivision::divide(SE, Step, GCD); IV = SE.getMulExpr(IV, Step); } - // The access function in the current subscript is computed as the cannonical + // The access function in the current subscript is computed as the canonical // induction variable IV (potentially scaled up by the step) and offset by // Rem, the offset of delinearization in the sub-array. const SCEV *Index = SE.getAddExpr(IV, Rem); @@ -7652,7 +7652,7 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) { typedef DenseMap VerifyMap; -/// replaceSubString - Replaces all occurences of From in Str with To. +/// replaceSubString - Replaces all occurrences of From in Str with To. static void replaceSubString(std::string &Str, StringRef From, StringRef To) { size_t Pos = 0; while ((Pos = Str.find(From, Pos)) != std::string::npos) { diff --git a/lib/CodeGen/CriticalAntiDepBreaker.cpp b/lib/CodeGen/CriticalAntiDepBreaker.cpp index 18c8e0ae125..463eb86769e 100644 --- a/lib/CodeGen/CriticalAntiDepBreaker.cpp +++ b/lib/CodeGen/CriticalAntiDepBreaker.cpp @@ -595,7 +595,7 @@ BreakAntiDependencies(const std::vector& SUnits, if (RC == reinterpret_cast(-1)) AntiDepReg = 0; - // Look for a suitable register to use to break the anti-depenence. + // Look for a suitable register to use to break the anti-dependence. // // TODO: Instead of picking the first free register, consider which might // be the best. diff --git a/lib/CodeGen/MachineScheduler.cpp b/lib/CodeGen/MachineScheduler.cpp index 2de3d20e597..beb724342ee 100644 --- a/lib/CodeGen/MachineScheduler.cpp +++ b/lib/CodeGen/MachineScheduler.cpp @@ -1976,7 +1976,7 @@ void SchedBoundary::bumpNode(SUnit *SU) { } else { // After updating ZoneCritResIdx and ExpectedLatency, check if we're - // resource limited. If a stall occured, bumpCycle does this. + // resource limited. If a stall occurred, bumpCycle does this. unsigned LFactor = SchedModel->getLatencyFactor(); IsResourceLimited = (int)(getCriticalCount() - (getScheduledLatency() * LFactor)) diff --git a/lib/CodeGen/ScheduleDAG.cpp b/lib/CodeGen/ScheduleDAG.cpp index 75e37907357..bd4c0e209de 100644 --- a/lib/CodeGen/ScheduleDAG.cpp +++ b/lib/CodeGen/ScheduleDAG.cpp @@ -63,7 +63,7 @@ const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const { /// not already. It also adds the current node as a successor of the /// specified node. bool SUnit::addPred(const SDep &D, bool Required) { - // If this node already has this depenence, don't add a redundant one. + // If this node already has this dependence, don't add a redundant one. for (SmallVectorImpl::iterator I = Preds.begin(), E = Preds.end(); I != E; ++I) { // Zero-latency weak edges may be added purely for heuristic ordering. Don't diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index dd67b45e728..2dafcb9fa97 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -6740,7 +6740,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); } - // The next optimizations are desireable only if SELECT_CC can be lowered. + // The next optimizations are desirable only if SELECT_CC can be lowered. // Check against MVT::Other for SELECT_CC, which is a workaround for targets // having to say they don't support SELECT_CC on every type the DAG knows // about, since there is no way to mark an opcode illegal at all value types @@ -6797,7 +6797,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); } - // The next optimizations are desireable only if SELECT_CC can be lowered. + // The next optimizations are desirable only if SELECT_CC can be lowered. // Check against MVT::Other for SELECT_CC, which is a workaround for targets // having to say they don't support SELECT_CC on every type the DAG knows // about, since there is no way to mark an opcode illegal at all value types @@ -8265,7 +8265,7 @@ bool DAGCombiner::SliceUpLoad(SDNode *N) { // The width of the type must be a power of 2 and greater than 8-bits. // Otherwise the load cannot be represented in LLVM IR. // Moreover, if we shifted with a non-8-bits multiple, the slice - // will be accross several bytes. We do not support that. + // will be across several bytes. We do not support that. unsigned Width = User->getValueSizeInBits(0); if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7)) return 0; diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp index d70e6b30448..79c1e7dce55 100644 --- a/lib/CodeGen/StackMaps.cpp +++ b/lib/CodeGen/StackMaps.cpp @@ -41,7 +41,7 @@ PatchPointOpers::PatchPointOpers(const MachineInstr *MI) ++CheckStartIdx; assert(getMetaIdx() == CheckStartIdx && - "Unexpected additonal definition in Patchpoint intrinsic."); + "Unexpected additional definition in Patchpoint intrinsic."); #endif } diff --git a/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h index f08e2870dce..8d16ee85d14 100644 --- a/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h +++ b/lib/ExecutionEngine/IntelJITEvents/jitprofiling.h @@ -164,10 +164,10 @@ typedef struct _iJIT_Method_NIDS typedef struct _LineNumberInfo { - /* x86 Offset from the begining of the method*/ - unsigned int Offset; - - /* source line number from the begining of the source file */ + /* x86 Offset from the beginning of the method*/ + unsigned int Offset; + + /* source line number from the beginning of the source file */ unsigned int LineNumber; } *pLineNumberInfo, LineNumberInfo; @@ -191,9 +191,9 @@ typedef struct _iJIT_Method_Load unsigned int method_size; /* Line Table size in number of entries - Zero if none */ - unsigned int line_number_size; - - /* Pointer to the begining of the line numbers info array */ + unsigned int line_number_size; + + /* Pointer to the beginning of the line numbers info array */ pLineNumberInfo line_number_table; /* unique class ID */ diff --git a/lib/ExecutionEngine/Interpreter/Execution.cpp b/lib/ExecutionEngine/Interpreter/Execution.cpp index 5de065903d5..8a101dfa27c 100644 --- a/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1120,7 +1120,7 @@ void Interpreter::visitCallSite(CallSite CS) { callFunction((Function*)GVTOP(SRC), ArgVals); } -// auxilary function for shift operations +// auxiliary function for shift operations static unsigned getShiftAmount(uint64_t orgShiftAmount, llvm::APInt valueToShift) { unsigned valueWidth = valueToShift.getBitWidth(); diff --git a/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp b/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp index cf90e77e389..f1dd5a6a519 100644 --- a/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp +++ b/lib/ExecutionEngine/MCJIT/SectionMemoryManager.cpp @@ -78,7 +78,7 @@ uint8_t *SectionMemoryManager::allocateSection(MemoryGroup &MemGroup, sys::Memory::MF_WRITE, ec); if (ec) { - // FIXME: Add error propogation to the interface. + // FIXME: Add error propagation to the interface. return NULL; } diff --git a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h index 181964faa99..2ed2957d96e 100644 --- a/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h +++ b/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h @@ -116,7 +116,7 @@ class RuntimeDyldELF : public RuntimeDyldImpl { virtual void updateGOTEntries(StringRef Name, uint64_t Addr); - // Relocation entries for symbols whose position-independant offset is + // Relocation entries for symbols whose position-independent offset is // updated in a global offset table. typedef SmallVector GOTRelocations; GOTRelocations GOTEntries; // List of entries requiring finalization. diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp index 035ecaacc8c..dba543b6833 100644 --- a/lib/MC/MCParser/AsmParser.cpp +++ b/lib/MC/MCParser/AsmParser.cpp @@ -1341,7 +1341,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info) { if (!getTargetParser().ParseDirective(ID)) return false; - // Next, check the extention directive map to see if any extension has + // Next, check the extension directive map to see if any extension has // registered itself to parse this directive. std::pair Handler = ExtensionDirectiveMap.lookup(IDVal); @@ -3164,13 +3164,13 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) { /// /// With the support added for named parameters there may be code out there that /// is transitioning from positional parameters. In versions of gas that did -/// not support named parameters they would be ignored on the macro defintion. +/// not support named parameters they would be ignored on the macro definition. /// But to support both styles of parameters this is not possible so if a macro -/// defintion has named parameters but does not use them and has what appears +/// definition has named parameters but does not use them and has what appears /// to be positional parameters, strings like $1, $2, ... and $n, then issue a /// warning that the positional parameter found in body which have no effect. /// Hoping the developer will either remove the named parameters from the macro -/// definiton so the positional parameters get used if that was what was +/// definition so the positional parameters get used if that was what was /// intended or change the macro to use the named parameters. It is possible /// this warning will trigger when the none of the named parameters are used /// and the strings like $1 are infact to simply to be passed trough unchanged. diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index 802233c1099..deb9b05206b 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -3776,7 +3776,7 @@ APFloat::opStatus APFloat::next(bool nextDown) { // change the payload. if (isSignaling()) { result = opInvalidOp; - // For consistency, propogate the sign of the sNaN to the qNaN. + // For consistency, propagate the sign of the sNaN to the qNaN. makeNaN(false, isNegative(), 0); } break; diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp index 89f96bd5774..0c4672582bf 100644 --- a/lib/Support/APInt.cpp +++ b/lib/Support/APInt.cpp @@ -1096,7 +1096,7 @@ APInt APInt::ashr(unsigned shiftAmt) const { // to include in this word. val[breakWord] = pVal[breakWord+offset] >> wordShift; - // Deal with sign extenstion in the break word, and possibly the word before + // Deal with sign extension in the break word, and possibly the word before // it. if (isNegative()) { if (wordShift > bitsInWord) { diff --git a/lib/Support/CommandLine.cpp b/lib/Support/CommandLine.cpp index 7ed4dead041..1b4d2c72875 100644 --- a/lib/Support/CommandLine.cpp +++ b/lib/Support/CommandLine.cpp @@ -246,12 +246,11 @@ static Option *LookupNearestOption(StringRef Arg, return Best; } -/// CommaSeparateAndAddOccurence - A wrapper around Handler->addOccurence() that -/// does special handling of cl::CommaSeparated options. -static bool CommaSeparateAndAddOccurence(Option *Handler, unsigned pos, - StringRef ArgName, - StringRef Value, bool MultiArg = false) -{ +/// CommaSeparateAndAddOccurrence - A wrapper around Handler->addOccurrence() +/// that does special handling of cl::CommaSeparated options. +static bool CommaSeparateAndAddOccurrence(Option *Handler, unsigned pos, + StringRef ArgName, StringRef Value, + bool MultiArg = false) { // Check to see if this option accepts a comma separated list of values. If // it does, we have to split up the value into multiple values. if (Handler->getMiscFlags() & CommaSeparated) { @@ -312,13 +311,13 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName, // If this isn't a multi-arg option, just run the handler. if (NumAdditionalVals == 0) - return CommaSeparateAndAddOccurence(Handler, i, ArgName, Value); + return CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value); // If it is, run the handle several times. bool MultiArg = false; if (Value.data()) { - if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg)) + if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg)) return true; --NumAdditionalVals; MultiArg = true; @@ -329,7 +328,7 @@ static inline bool ProvideOption(Option *Handler, StringRef ArgName, return Handler->error("not enough values!"); Value = argv[++i]; - if (CommaSeparateAndAddOccurence(Handler, i, ArgName, Value, MultiArg)) + if (CommaSeparateAndAddOccurrence(Handler, i, ArgName, Value, MultiArg)) return true; MultiArg = true; --NumAdditionalVals; @@ -1502,7 +1501,7 @@ protected: std::vector SortedCategories; std::map > CategorizedOptions; - // Collect registered option categories into vector in preperation for + // Collect registered option categories into vector in preparation for // sorting. for (OptionCatSet::const_iterator I = RegisteredOptionCategories->begin(), E = RegisteredOptionCategories->end(); diff --git a/lib/Support/Path.cpp b/lib/Support/Path.cpp index b97b7a4baf2..c2b3f1863ea 100644 --- a/lib/Support/Path.cpp +++ b/lib/Support/Path.cpp @@ -32,10 +32,10 @@ namespace { #ifdef LLVM_ON_WIN32 const char *separators = "\\/"; - const char prefered_separator = '\\'; + const char preferred_separator = '\\'; #else const char separators = '/'; - const char prefered_separator = '/'; + const char preferred_separator = '/'; #endif StringRef find_first_component(StringRef path) { @@ -403,7 +403,7 @@ void append(SmallVectorImpl &path, const Twine &a, if (!component_has_sep && !(path.empty() || is_root_name)) { // Add a separator. - path.push_back(prefered_separator); + path.push_back(preferred_separator); } path.append(i->begin(), i->end()); diff --git a/lib/Support/regcomp.c b/lib/Support/regcomp.c index 74d9186aaaa..0b5b765f89e 100644 --- a/lib/Support/regcomp.c +++ b/lib/Support/regcomp.c @@ -532,10 +532,10 @@ p_simp_re(struct parse *p, sopno subno; # define BACKSL (1<; defm TBL3 : NI_TBL_pat<0b10, 0b0, "tbl", "VTriple">; defm TBL4 : NI_TBL_pat<0b11, 0b0, "tbl", "VQuad">; -// Table lookup extention +// Table lookup extension class NI_TBX op2, bits<2> len, bit op, string asmop, string OpS, RegisterOperand OpVPR, RegisterOperand VecList> diff --git a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp index ff5b23013df..a88cbb2971f 100644 --- a/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp +++ b/lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp @@ -1517,7 +1517,7 @@ static DecodeStatus DecodeVLDSTLanePostInstruction(MCInst &Inst, unsigned Insn, unsigned Q = fieldFromInstruction(Insn, 30, 1); unsigned S = fieldFromInstruction(Insn, 10, 3); unsigned lane = 0; - // Calculate the number of lanes by number of vectors and transfered bytes. + // Calculate the number of lanes by number of vectors and transferred bytes. // NumLanes = 16 bytes / bytes of each lane unsigned NumLanes = 16 / (TransferBytes / NumVecs); switch (NumLanes) { diff --git a/lib/Target/ARM/ARMISelDAGToDAG.cpp b/lib/Target/ARM/ARMISelDAGToDAG.cpp index d561db2f07e..9e827cf1035 100644 --- a/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -1407,7 +1407,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue N, bool ARMDAGToDAGISel::SelectT2AddrModeExclusive(SDValue N, SDValue &Base, SDValue &OffImm) { - // This *must* succeed since it's used for the irreplacable ldrex and strex + // This *must* succeed since it's used for the irreplaceable ldrex and strex // instructions. Base = N; OffImm = CurDAG->getTargetConstant(0, MVT::i32); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 1d2236f2c9f..b851a8ffd89 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -5987,7 +5987,7 @@ static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { if (cast(Op)->getOrdering() <= Monotonic) return Op; - // Aquire/Release load/store is not legal for targets without a + // Acquire/Release load/store is not legal for targets without a // dmb or equivalent available. return SDValue(); } @@ -10189,7 +10189,7 @@ bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const case MVT::v2f64: { // For any little-endian targets with neon, we can support unaligned ld/st // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8. - // A big-endian target may also explictly support unaligned accesses + // A big-endian target may also explicitly support unaligned accesses if (Subtarget->hasNEON() && (AllowsUnaligned || isLittleEndian())) { if (Fast) *Fast = true; diff --git a/lib/Target/ARM/ARMRegisterInfo.td b/lib/Target/ARM/ARMRegisterInfo.td index d0457618ef6..7f0fe05738c 100644 --- a/lib/Target/ARM/ARMRegisterInfo.td +++ b/lib/Target/ARM/ARMRegisterInfo.td @@ -214,7 +214,7 @@ def GPRnopc : RegisterClass<"ARM", [i32], 32, (sub GPR, PC)> { } // GPRs without the PC but with APSR. Some instructions allow accessing the -// APSR, while actually encoding PC in the register field. This is usefull +// APSR, while actually encoding PC in the register field. This is useful // for assembly and disassembly only. def GPRwithAPSR : RegisterClass<"ARM", [i32], 32, (add (sub GPR, PC), APSR_NZCV)> { let AltOrders = [(add LR, GPRnopc), (trunc GPRnopc, 8)]; diff --git a/lib/Target/ARM/ARMScheduleSwift.td b/lib/Target/ARM/ARMScheduleSwift.td index 8d7dbc24609..b03d5ff44c6 100644 --- a/lib/Target/ARM/ARMScheduleSwift.td +++ b/lib/Target/ARM/ARMScheduleSwift.td @@ -1721,7 +1721,7 @@ let SchedModel = SwiftModel in { SchedVar, - // Load of a Q register (not neccessarily true). We should not be mapping to + // Load of a Q register (not necessarily true). We should not be mapping to // 4 S registers, either. SchedVar, @@ -1858,7 +1858,7 @@ let SchedModel = SwiftModel in { // Assume 5 D registers. SchedVar, SchedVar, - // Asume three Q registers. + // Assume three Q registers. SchedVar, SchedVar, // Assume 7 D registers. diff --git a/lib/Target/ARM/ARMTargetTransformInfo.cpp b/lib/Target/ARM/ARMTargetTransformInfo.cpp index 2f2da73162c..5d0b73a191e 100644 --- a/lib/Target/ARM/ARMTargetTransformInfo.cpp +++ b/lib/Target/ARM/ARMTargetTransformInfo.cpp @@ -533,7 +533,7 @@ unsigned ARMTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueK // creates a sequence of shift, and, or instructions to construct values. // These sequences are recognized by the ISel and have zero-cost. Not so for // the vectorized code. Because we have support for v2i64 but not i64 those - // sequences look particularily beneficial to vectorize. + // sequences look particularly beneficial to vectorize. // To work around this we increase the cost of v2i64 operations to make them // seem less beneficial. if (LT.second == MVT::v2i64 && diff --git a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h index bb781ecece0..42a1cbb8c22 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h +++ b/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h @@ -183,7 +183,8 @@ namespace ARM_ISB { inline static const char *InstSyncBOptToString(unsigned val) { switch (val) { - default: llvm_unreachable("Unkown memory operation"); + default: + llvm_unreachable("Unknown memory operation"); case RESERVED_0: return "#0x0"; case RESERVED_1: return "#0x1"; case RESERVED_2: return "#0x2"; diff --git a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp index 8e224780d83..abacc1e3126 100644 --- a/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp +++ b/lib/Target/ARM/MCTargetDesc/ARMELFStreamer.cpp @@ -1035,7 +1035,7 @@ void ARMELFStreamer::emitFnStart() { } void ARMELFStreamer::emitFnEnd() { - assert(FnStart && ".fnstart must preceeds .fnend"); + assert(FnStart && ".fnstart must precedes .fnend"); // Emit unwind opcodes if there is no .handlerdata directive if (!ExTab && !CantUnwind) diff --git a/lib/Target/Hexagon/HexagonCopyToCombine.cpp b/lib/Target/Hexagon/HexagonCopyToCombine.cpp index 97e7b85f55b..3ab796d01c4 100644 --- a/lib/Target/Hexagon/HexagonCopyToCombine.cpp +++ b/lib/Target/Hexagon/HexagonCopyToCombine.cpp @@ -285,7 +285,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, // Update the intermediate instruction to with the kill flag. if (KillingInstr) { bool Added = KillingInstr->addRegisterKilled(KilledOperand, TRI, true); - (void)Added; // supress compiler warning + (void)Added; // suppress compiler warning assert(Added && "Must successfully update kill flag"); removeKillInfo(I2, KilledOperand); } @@ -343,7 +343,7 @@ bool HexagonCopyToCombine::isSafeToMoveTogether(MachineInstr *I1, // Update I1 to set the kill flag. This flag will later be picked up by // the new COMBINE instruction. bool Added = I1->addRegisterKilled(KilledOperand, TRI); - (void)Added; // supress compiler warning + (void)Added; // suppress compiler warning assert(Added && "Must successfully update kill flag"); } DoInsertAtI1 = false; diff --git a/lib/Target/Hexagon/HexagonHardwareLoops.cpp b/lib/Target/Hexagon/HexagonHardwareLoops.cpp index 137c4bf0543..fccbcb3d703 100644 --- a/lib/Target/Hexagon/HexagonHardwareLoops.cpp +++ b/lib/Target/Hexagon/HexagonHardwareLoops.cpp @@ -1522,7 +1522,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( if (PB != Latch) { Tmp2.clear(); bool NotAnalyzed = TII->AnalyzeBranch(*PB, TB, FB, Tmp2, false); - (void)NotAnalyzed; // supress compiler warning + (void)NotAnalyzed; // suppress compiler warning assert (!NotAnalyzed && "Should be analyzable!"); if (TB != Header && (Tmp2.empty() || FB != Header)) TII->InsertBranch(*PB, NewPH, 0, EmptyCond, DL); @@ -1534,7 +1534,7 @@ MachineBasicBlock *HexagonHardwareLoops::createPreheaderForLoop( // Insert an unconditional branch to the header. TB = FB = 0; bool LatchNotAnalyzed = TII->AnalyzeBranch(*Latch, TB, FB, Tmp2, false); - (void)LatchNotAnalyzed; // supress compiler warning + (void)LatchNotAnalyzed; // suppress compiler warning assert (!LatchNotAnalyzed && "Should be analyzable!"); if (!TB && !FB) TII->InsertBranch(*Latch, Header, 0, EmptyCond, DL); diff --git a/lib/Target/Hexagon/HexagonInstrInfo.cpp b/lib/Target/Hexagon/HexagonInstrInfo.cpp index f9be3192f1f..fff51dda679 100644 --- a/lib/Target/Hexagon/HexagonInstrInfo.cpp +++ b/lib/Target/Hexagon/HexagonInstrInfo.cpp @@ -1793,7 +1793,7 @@ bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const { return true; if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) { - // Check addressing mode and retreive non-ext equivalent instruction. + // Check addressing mode and retrieve non-ext equivalent instruction. switch (getAddrMode(MI)) { case HexagonII::Absolute : @@ -1827,7 +1827,7 @@ short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const { return NonExtOpcode; if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) { - // Check addressing mode and retreive non-ext equivalent instruction. + // Check addressing mode and retrieve non-ext equivalent instruction. switch (getAddrMode(MI)) { case HexagonII::Absolute : return Hexagon::getBasedWithImmOffset(MI->getOpcode()); diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index 594ff4f4411..aae2dcd16e7 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -869,7 +869,7 @@ void MipsAsmParser::expandMemInst(MCInst &Inst, SMLoc IDLoc, TempInst.addOperand(MCOperand::CreateReg(BaseRegNum)); Instructions.push_back(TempInst); TempInst.clear(); - // And finaly, create original instruction with low part + // And finally, create original instruction with low part // of offset and new base. TempInst.setOpcode(Inst.getOpcode()); TempInst.addOperand(MCOperand::CreateReg(RegOpNum)); @@ -1247,7 +1247,7 @@ MipsAsmParser::ParseOperand(SmallVectorImpl &Operands, return false; } // Look for the existing symbol, we should check if - // we need to assigne the propper RegisterKind. + // we need to assigne the proper RegisterKind. if (searchSymbolAlias(Operands, MipsOperand::Kind_None)) return false; // Else drop to expression parsing. diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td index fbcd10fe2b8..a788d60a575 100644 --- a/lib/Target/Mips/MipsMSAInstrInfo.td +++ b/lib/Target/Mips/MipsMSAInstrInfo.td @@ -3519,7 +3519,7 @@ class MSABitconvertPat; -// These are endian-independant because the element size doesnt change +// These are endian-independent because the element size doesnt change def : MSABitconvertPat; def : MSABitconvertPat; def : MSABitconvertPat; diff --git a/lib/Target/NVPTX/NVPTXISelLowering.cpp b/lib/Target/NVPTX/NVPTXISelLowering.cpp index d8151761e05..8c27c9f409b 100644 --- a/lib/Target/NVPTX/NVPTXISelLowering.cpp +++ b/lib/Target/NVPTX/NVPTXISelLowering.cpp @@ -1258,7 +1258,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const { // Since StoreV2 is a target node, we cannot rely on DAG type legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // stored type to i16 and propogate the "real" type as the memory type. + // stored type to i16 and propagate the "real" type as the memory type. bool NeedExt = false; if (EltVT.getSizeInBits() < 16) NeedExt = true; @@ -2074,7 +2074,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG, // Since LoadV2 is a target node, we cannot rely on DAG type legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // loaded type to i16 and propogate the "real" type as the memory type. + // loaded type to i16 and propagate the "real" type as the memory type. bool NeedTrunc = false; if (EltVT.getSizeInBits() < 16) { EltVT = MVT::i16; @@ -2161,7 +2161,7 @@ static void ReplaceINTRINSIC_W_CHAIN(SDNode *N, SelectionDAG &DAG, // Since LDU/LDG are target nodes, we cannot rely on DAG type // legalization. // Therefore, we must ensure the type is legal. For i1 and i8, we set the - // loaded type to i16 and propogate the "real" type as the memory type. + // loaded type to i16 and propagate the "real" type as the memory type. bool NeedTrunc = false; if (EltVT.getSizeInBits() < 16) { EltVT = MVT::i16; diff --git a/lib/Target/NVPTX/NVVMReflect.cpp b/lib/Target/NVPTX/NVVMReflect.cpp index bc67cb14ff7..5da8c2ed092 100644 --- a/lib/Target/NVPTX/NVVMReflect.cpp +++ b/lib/Target/NVPTX/NVVMReflect.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This pass replaces occurences of __nvvm_reflect("string") with an +// This pass replaces occurrences of __nvvm_reflect("string") with an // integer based on -nvvm-reflect-list string= option given to this pass. // If an undefined string value is seen in a call to __nvvm_reflect("string"), // a default value of 0 will be used. @@ -84,7 +84,7 @@ NVVMReflectEnabled("nvvm-reflect-enable", cl::init(true), cl::Hidden, char NVVMReflect::ID = 0; INITIALIZE_PASS(NVVMReflect, "nvvm-reflect", - "Replace occurences of __nvvm_reflect() calls with 0/1", false, + "Replace occurrences of __nvvm_reflect() calls with 0/1", false, false) static cl::list diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index c5d96127716..1c8f928887c 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -7205,7 +7205,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N, // you might suspect (sizeof(vector) bytes after the last requested // load), but rather sizeof(vector) - 1 bytes after the last // requested vector. The point of this is to avoid a page fault if the - // base address happend to be aligned. This works because if the base + // base address happened to be aligned. This works because if the base // address is aligned, then adding less than a full vector length will // cause the last vector in the sequence to be (re)loaded. Otherwise, // the next vector will be fetched as you might suspect was necessary. diff --git a/lib/Target/R600/AMDGPU.h b/lib/Target/R600/AMDGPU.h index 8eb1b695d76..3e1848b5f8e 100644 --- a/lib/Target/R600/AMDGPU.h +++ b/lib/Target/R600/AMDGPU.h @@ -68,7 +68,7 @@ namespace ShaderType { /// various memory regions on the hardware. On the CPU /// all of the address spaces point to the same memory, /// however on the GPU, each address space points to -/// a seperate piece of memory that is unique from other +/// a separate piece of memory that is unique from other /// memory locations. namespace AMDGPUAS { enum AddressSpaces { diff --git a/lib/Target/R600/AMDILCFGStructurizer.cpp b/lib/Target/R600/AMDILCFGStructurizer.cpp index 4ad7eba36e2..69ced3c8f6c 100644 --- a/lib/Target/R600/AMDILCFGStructurizer.cpp +++ b/lib/Target/R600/AMDILCFGStructurizer.cpp @@ -224,7 +224,7 @@ protected: /// Compute the reversed DFS post order of Blocks void orderBlocks(MachineFunction *MF); - // Function originaly from CFGStructTraits + // Function originally from CFGStructTraits void insertInstrEnd(MachineBasicBlock *MBB, int NewOpcode, DebugLoc DL = DebugLoc()); MachineInstr *insertInstrBefore(MachineBasicBlock *MBB, int NewOpcode, diff --git a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp index 5af83209a0d..fc4ed35c189 100644 --- a/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp +++ b/lib/Target/R600/MCTargetDesc/SIMCCodeEmitter.cpp @@ -53,7 +53,7 @@ public: ~SIMCCodeEmitter() { } - /// \breif Encode the instruction and write it to the OS. + /// \brief Encode the instruction and write it to the OS. virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS, SmallVectorImpl &Fixups) const; diff --git a/lib/Target/R600/R600ClauseMergePass.cpp b/lib/Target/R600/R600ClauseMergePass.cpp index 33d2ca32577..3d9015c9dfe 100644 --- a/lib/Target/R600/R600ClauseMergePass.cpp +++ b/lib/Target/R600/R600ClauseMergePass.cpp @@ -50,7 +50,7 @@ private: /// IfCvt pass can generate "disabled" ALU clause marker that need to be /// removed and their content affected to the previous alu clause. - /// This function parse instructions after CFAlu untill it find a disabled + /// This function parse instructions after CFAlu until it find a disabled /// CFAlu and merge the content, or an enabled CFAlu. void cleanPotentialDisabledCFAlu(MachineInstr *CFAlu) const; diff --git a/lib/Target/R600/R600Defines.h b/lib/Target/R600/R600Defines.h index 1781f2aee16..f2f28fe469b 100644 --- a/lib/Target/R600/R600Defines.h +++ b/lib/Target/R600/R600Defines.h @@ -52,7 +52,7 @@ namespace R600_InstFlag { #define HAS_NATIVE_OPERANDS(Flags) ((Flags) & R600_InstFlag::NATIVE_OPERANDS) -/// \brief Defines for extracting register infomation from register encoding +/// \brief Defines for extracting register information from register encoding #define HW_REG_MASK 0x1ff #define HW_CHAN_SHIFT 9 diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index 03feabe23e6..b9b242a6e89 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -990,7 +990,7 @@ SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const DAG.getCondCode(ISD::SETNE)); } -/// LLVM generates byte-addresed pointers. For indirect addressing, we need to +/// LLVM generates byte-addressed pointers. For indirect addressing, we need to /// convert these pointers to a register index. Each register holds /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the /// \p StackWidth, which tells us how many of the 4 sub-registrers will be used @@ -1389,8 +1389,8 @@ SDValue R600TargetLowering::LowerFormalArguments( DAG.getConstant(36 + VA.getLocMemOffset(), MVT::i32), MachinePointerInfo(UndefValue::get(PtrTy)), MemVT, false, false, 4); - // 4 is the prefered alignment for - // the CONSTANT memory space. + // 4 is the preferred alignment for + // the CONSTANT memory space. InVals.push_back(Arg); } return Chain; diff --git a/lib/Target/R600/R600ISelLowering.h b/lib/Target/R600/R600ISelLowering.h index c10257eeada..3cca93306b5 100644 --- a/lib/Target/R600/R600ISelLowering.h +++ b/lib/Target/R600/R600ISelLowering.h @@ -43,7 +43,7 @@ private: unsigned Gen; /// Each OpenCL kernel has nine implicit parameters that are stored in the /// first nine dwords of a Vertex Buffer. These implicit parameters are - /// lowered to load instructions which retreive the values from the Vertex + /// lowered to load instructions which retrieve the values from the Vertex /// Buffer. SDValue LowerImplicitParameter(SelectionDAG &DAG, EVT VT, SDLoc DL, unsigned DwordOffset) const; diff --git a/lib/Target/R600/R600InstrInfo.h b/lib/Target/R600/R600InstrInfo.h index 13d981094ed..d5ff4de7646 100644 --- a/lib/Target/R600/R600InstrInfo.h +++ b/lib/Target/R600/R600InstrInfo.h @@ -138,7 +138,7 @@ namespace llvm { /// Same but using const index set instead of MI set. bool fitsConstReadLimitations(const std::vector&) const; - /// \breif Vector instructions are instructions that must fill all + /// \brief Vector instructions are instructions that must fill all /// instruction slots within an instruction group. bool isVector(const MachineInstr &MI) const; diff --git a/lib/Target/R600/R600Instructions.td b/lib/Target/R600/R600Instructions.td index 4441fa6495e..f7b7488d69c 100644 --- a/lib/Target/R600/R600Instructions.td +++ b/lib/Target/R600/R600Instructions.td @@ -2263,7 +2263,7 @@ let Inst{63-32} = Word1; //===--------------------------------------------------------------------===// //===---------------------------------------------------------------------===// // Custom Inserter for Branches and returns, this eventually will be a -// seperate pass +// separate pass //===---------------------------------------------------------------------===// let isTerminator = 1, usesCustomInserter = 1, isBranch = 1, isBarrier = 1 in { def BRANCH : ILFormat<(outs), (ins brtarget:$target), diff --git a/lib/Target/R600/R600Packetizer.cpp b/lib/Target/R600/R600Packetizer.cpp index cd9b6eae6ed..9dd4978fb5b 100644 --- a/lib/Target/R600/R600Packetizer.cpp +++ b/lib/Target/R600/R600Packetizer.cpp @@ -66,7 +66,7 @@ private: } /// \returns register to PV chan mapping for bundle/single instructions that - /// immediatly precedes I. + /// immediately precedes I. DenseMap getPreviousVector(MachineBasicBlock::iterator I) const { DenseMap Result; diff --git a/lib/Target/R600/SIISelLowering.cpp b/lib/Target/R600/SIISelLowering.cpp index a66f289e9ab..36dd3cf7f0b 100644 --- a/lib/Target/R600/SIISelLowering.cpp +++ b/lib/Target/R600/SIISelLowering.cpp @@ -1083,7 +1083,7 @@ void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand, else return; - // Nothing todo if they fit naturaly + // Nothing to do if they fit naturally if (fitsRegClass(DAG, Operand, RegClass)) return; diff --git a/lib/Target/R600/SIRegisterInfo.cpp b/lib/Target/R600/SIRegisterInfo.cpp index ed0bbaffae6..a784fa42647 100644 --- a/lib/Target/R600/SIRegisterInfo.cpp +++ b/lib/Target/R600/SIRegisterInfo.cpp @@ -122,7 +122,7 @@ const TargetRegisterClass *SIRegisterInfo::getSubRegClass( return RC; // If this register has a sub-register, we can safely assume it is a 32-bit - // register, becuase all of SI's sub-registers are 32-bit. + // register, because all of SI's sub-registers are 32-bit. if (isSGPRClass(RC)) { return &AMDGPU::SGPR_32RegClass; } else { diff --git a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h index f3caeaa0c23..2e2d4bac797 100644 --- a/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h +++ b/lib/Target/Sparc/MCTargetDesc/SparcBaseInfo.h @@ -35,7 +35,7 @@ enum TOF { // Assembler: %hi(addr) or %lm(addr) MO_HI, - // Extract bits 43-22 of an adress. Only for sethi. + // Extract bits 43-22 of an address. Only for sethi. // Assembler: %h44(addr) MO_H44, diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 0ca145e3a61..19f57ab63ea 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1076,7 +1076,7 @@ static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); - // The remaing cases are 1, 2, 0/1/3 and 0/2/3. All these are + // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are // can be done by inverting the low CC bit and applying one of the // sign-based extractions above. if (CCMask == (CCValid & SystemZ::CCMASK_1)) diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index 90941d3616e..55192f9d4e4 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -53,7 +53,7 @@ void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, MachineFunction &MF = *MBB->getParent(); // Get two load or store instructions. Use the original instruction for one - // of them (arbitarily the second here) and create a clone for the other. + // of them (arbitrarily the second here) and create a clone for the other. MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); MBB->insert(MI, EarlierMI); diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td index e1af0932c23..033f0d8ee66 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/lib/Target/SystemZ/SystemZInstrInfo.td @@ -759,7 +759,7 @@ let Defs = [CC], Uses = [CC] in { // Subtraction //===----------------------------------------------------------------------===// -// Plain substraction. Although immediate forms exist, we use the +// Plain subtraction. Although immediate forms exist, we use the // add-immediate instruction instead. let Defs = [CC], CCValues = 0xF, CompareZeroCCMask = 0x8 in { // Subtraction of a register. diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index c4c86ada3fa..ac3b39df547 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -563,7 +563,7 @@ struct InternalInstruction { uint8_t prefixPresent[0x100]; /* contains the location (for use with the reader) of the prefix byte */ uint64_t prefixLocations[0x100]; - /* The value of the vector extention prefix(EVEX/VEX/XOP), if present */ + /* The value of the vector extension prefix(EVEX/VEX/XOP), if present */ uint8_t vectorExtensionPrefix[4]; /* The type of the vector extension prefix */ VectorExtensionType vectorExtensionType; diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index 9fdc58a3116..d653c871b29 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -1512,7 +1512,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) { // garbage. Indeed, only the less significant bit is supposed to be accurate. // If we read more than the lsb, we may see non-zero values whereas lsb // is zero. Therefore, we have to truncate Op0Reg to i1 for the select. - // This is acheived by performing TEST against 1. + // This is achieved by performing TEST against 1. BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri)) .addReg(Op0Reg).addImm(1); unsigned ResultReg = createResultReg(RC); diff --git a/lib/Target/X86/X86Schedule.td b/lib/Target/X86/X86Schedule.td index 0556437b839..ac28d1e5436 100644 --- a/lib/Target/X86/X86Schedule.td +++ b/lib/Target/X86/X86Schedule.td @@ -577,7 +577,7 @@ def IIC_NOP : InstrItinClass; //===----------------------------------------------------------------------===// // Processor instruction itineraries. -// IssueWidth is analagous to the number of decode units. Core and its +// IssueWidth is analogous to the number of decode units. Core and its // descendents, including Nehalem and SandyBridge have 4 decoders. // Resources beyond the decoder operate on micro-ops and are bufferred // so adjacent micro-ops don't directly compete. diff --git a/lib/Target/XCore/XCoreLowerThreadLocal.cpp b/lib/Target/XCore/XCoreLowerThreadLocal.cpp index 784bd66d2e0..3a93d2ac2e7 100644 --- a/lib/Target/XCore/XCoreLowerThreadLocal.cpp +++ b/lib/Target/XCore/XCoreLowerThreadLocal.cpp @@ -154,8 +154,8 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) { return false; } } - } while (CE->hasNUsesOrMore(1)); // We need to check becasue a recursive - // sibbling may have used 'CE' when createReplacementInstr was called. + } while (CE->hasNUsesOrMore(1)); // We need to check because a recursive + // sibling may have used 'CE' when createReplacementInstr was called. CE->destroyConstant(); return true; } diff --git a/lib/Transforms/IPO/ConstantMerge.cpp b/lib/Transforms/IPO/ConstantMerge.cpp index d94c0f45323..aefcff95653 100644 --- a/lib/Transforms/IPO/ConstantMerge.cpp +++ b/lib/Transforms/IPO/ConstantMerge.cpp @@ -77,8 +77,8 @@ static void FindUsedValues(GlobalVariable *LLVMUsed, } // True if A is better than B. -static bool IsBetterCannonical(const GlobalVariable &A, - const GlobalVariable &B) { +static bool IsBetterCanonical(const GlobalVariable &A, + const GlobalVariable &B) { if (!A.hasLocalLinkage() && B.hasLocalLinkage()) return true; @@ -160,7 +160,7 @@ bool ConstantMerge::runOnModule(Module &M) { // If this is the first constant we find or if the old one is local, // replace with the current one. If the current is externally visible // it cannot be replace, but can be the canonical constant we merge with. - if (Slot == 0 || IsBetterCannonical(*GV, *Slot)) + if (Slot == 0 || IsBetterCanonical(*GV, *Slot)) Slot = GV; } diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp index 38614216c3c..33f0707f29a 100644 --- a/lib/Transforms/IPO/MergeFunctions.cpp +++ b/lib/Transforms/IPO/MergeFunctions.cpp @@ -723,7 +723,7 @@ void MergeFunctions::writeThunkOrAlias(Function *F, Function *G) { // Helper for writeThunk, // Selects proper bitcast operation, -// but a bit simplier then CastInst::getCastOpcode. +// but a bit simpler then CastInst::getCastOpcode. static Value* createCast(IRBuilder &Builder, Value *V, Type *DestTy) { Type *SrcTy = V->getType(); if (SrcTy->isIntegerTy() && DestTy->isPointerTy()) diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index c949720b1f5..7d2fc0a5289 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -77,7 +77,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) { // A single load+store correctly handles overlapping memory in the memmove // case. uint64_t Size = MemOpLength->getLimitedValue(); - assert(Size && "0-sized memory transfering should be removed already."); + assert(Size && "0-sized memory transferring should be removed already."); if (Size > 8 || (Size&(Size-1))) return 0; // If not 1/2/4/8 bytes, exit. @@ -684,7 +684,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { return ReplaceInstUsesWith(CI, ConstantVector::get(NewElems)); } - // Couldn't simplify - cannonicalize constant to the RHS. + // Couldn't simplify - canonicalize constant to the RHS. std::swap(Arg0, Arg1); } diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index c85ec29f2fa..cccfd4d49ef 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1193,10 +1193,10 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) { // will not occur because the result of OpI is exact (as we will for // FMul, for example) is hopeless. However, we *can* nonetheless // frequently know that double rounding cannot occur (or that it is - // innoculous) by taking advantage of the specific structure of + // innocuous) by taking advantage of the specific structure of // infinitely-precise results that admit double rounding. // - // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficent + // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient // to represent both sources, we can guarantee that the double // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis, // "A Rigorous Framework for Fully Supporting the IEEE Standard ..." diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index 3bc8ad3c8c4..5dd3325a0bc 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -2048,7 +2048,7 @@ static APInt DemandedBitsLHSMask(ICmpInst &I, /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst /// should be swapped. -/// The descision is based on how many times these two operands are reused +/// The decision is based on how many times these two operands are reused /// as subtract operands and their positions in those instructions. /// The rational is that several architectures use the same instruction for /// both subtract and cmp, thus it is better if the order of those operands @@ -2064,7 +2064,7 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0, // Each time Op0 is the first operand, count -1: swapping is bad, the // subtract has already the same layout as the compare. // Each time Op0 is the second operand, count +1: swapping is good, the - // subtract has a diffrent layout as the compare. + // subtract has a different layout as the compare. // At the end, if the benefit is greater than 0, Op0 should come second to // expose more CSE opportunities. int GlobalSwapBenefits = 0; diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 178be61b43e..7ffb01b5b2a 100644 --- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -1013,7 +1013,7 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { // references from RHSOp0 to LHSOp0, so we don't need to shift the mask. // If newRHS == newLHS, we want to remap any references from newRHS to // newLHS so that we can properly identify splats that may occur due to - // obfuscation accross the two vectors. + // obfuscation across the two vectors. if (eltMask >= 0 && newRHS != NULL && newLHS != newRHS) eltMask += newLHSWidth; } diff --git a/lib/Transforms/InstCombine/InstructionCombining.cpp b/lib/Transforms/InstCombine/InstructionCombining.cpp index 6a7252fc41e..b453f81de94 100644 --- a/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -1629,7 +1629,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { return &BI; } - // Cannonicalize fcmp_one -> fcmp_oeq + // Canonicalize fcmp_one -> fcmp_oeq FCmpInst::Predicate FPred; Value *Y; if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), TrueDest, FalseDest)) && @@ -1645,7 +1645,7 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { return &BI; } - // Cannonicalize icmp_ne -> icmp_eq + // Canonicalize icmp_ne -> icmp_eq ICmpInst::Predicate IPred; if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), TrueDest, FalseDest)) && diff --git a/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/lib/Transforms/Instrumentation/AddressSanitizer.cpp index fe875192f58..377d0d87d16 100644 --- a/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -249,7 +249,7 @@ static ShadowMapping getShadowMapping(const Module &M, int LongSize) { ShadowMapping Mapping; // OR-ing shadow offset if more efficient (at least on x86), - // but on ppc64 we have to use add since the shadow offset is not neccesary + // but on ppc64 we have to use add since the shadow offset is not necessary // 1/8-th of the address space. Mapping.OrShadowOffset = !IsPPC64 && !ClShort64BitOffset; diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 8a52a4444be..93e71cc1161 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1964,7 +1964,7 @@ struct MemorySanitizerVisitor : public InstVisitor { // Now, get the shadow for the RetVal. if (!I.getType()->isSized()) return; IRBuilder<> IRBBefore(&I); - // Untill we have full dynamic coverage, make sure the retval shadow is 0. + // Until we have full dynamic coverage, make sure the retval shadow is 0. Value *Base = getShadowPtrForRetval(&I, IRBBefore); IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base, kShadowTLSAlignment); Instruction *NextInsn = 0; diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 5c188178200..45703113405 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -487,7 +487,7 @@ bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) { } // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x -// standards. For background see C++11 standard. A slightly older, publically +// standards. For background see C++11 standard. A slightly older, publicly // available draft of the standard (not entirely up-to-date, but close enough // for casual browsing) is available here: // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf diff --git a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp index f8b6f15850f..8e2c362ad81 100644 --- a/lib/Transforms/ObjCARC/ObjCARCOpts.cpp +++ b/lib/Transforms/ObjCARC/ObjCARCOpts.cpp @@ -382,7 +382,7 @@ namespace { void clear(); /// Conservatively merge the two RRInfo. Returns true if a partial merge has - /// occured, false otherwise. + /// occurred, false otherwise. bool Merge(const RRInfo &Other); }; @@ -659,7 +659,7 @@ namespace { /// which pass through this block. This is only valid after both the /// top-down and bottom-up traversals are complete. /// - /// Returns true if overflow occured. Returns false if overflow did not + /// Returns true if overflow occurred. Returns false if overflow did not /// occur. bool GetAllPathCountWithOverflow(unsigned &PathCount) const { if (TopDownPathCount == OverflowOccurredValue || @@ -667,7 +667,7 @@ namespace { return true; unsigned long long Product = (unsigned long long)TopDownPathCount*BottomUpPathCount; - // Overflow occured if any of the upper bits of Product are set or if all + // Overflow occurred if any of the upper bits of Product are set or if all // the lower bits of Product are all set. return (Product >> 32) || ((PathCount = Product) == OverflowOccurredValue); @@ -711,7 +711,7 @@ void BBState::MergePred(const BBState &Other) { // In order to be consistent, we clear the top down pointers when by adding // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow - // has not occured. + // has not occurred. if (TopDownPathCount == OverflowOccurredValue) { clearTopDownPointers(); return; @@ -755,7 +755,7 @@ void BBState::MergeSucc(const BBState &Other) { // In order to be consistent, we clear the top down pointers when by adding // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow - // has not occured. + // has not occurred. if (BottomUpPathCount == OverflowOccurredValue) { clearBottomUpPointers(); return; @@ -1808,13 +1808,13 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst, // pointer has multiple owners implying that we must be more conservative. // // This comes up in the context of a pointer being ``KnownSafe''. In the - // presense of a block being initialized, the frontend will emit the + // presence of a block being initialized, the frontend will emit the // objc_retain on the original pointer and the release on the pointer loaded // from the alloca. The optimizer will through the provenance analysis // realize that the two are related, but since we only require KnownSafe in // one direction, will match the inner retain on the original pointer with // the guard release on the original pointer. This is fixed by ensuring that - // in the presense of allocas we only unconditionally remove pointers if + // in the presence of allocas we only unconditionally remove pointers if // both our retain and our release are KnownSafe. if (StoreInst *SI = dyn_cast(Inst)) { if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) { diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index b4d59fa0eaf..c89cd74c6eb 100644 --- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -109,8 +109,8 @@ namespace { bool preliminaryScreen(); /// Check if the given conditional branch is based on the comparison - /// beween a variable and zero, and if the variable is non-zero, the - /// control yeilds to the loop entry. If the branch matches the behavior, + /// between a variable and zero, and if the variable is non-zero, the + /// control yields to the loop entry. If the branch matches the behavior, /// the variable involved in the comparion is returned. This function will /// be called to see if the precondition and postcondition of the loop /// are in desirable form. @@ -521,7 +521,7 @@ void NclPopcountRecognize::transform(Instruction *CntInst, // TripCnt is exactly the number of iterations the loop has TripCnt = NewCount; - // If the popoulation counter's initial value is not zero, insert Add Inst. + // If the population counter's initial value is not zero, insert Add Inst. Value *CntInitVal = CntPhi->getIncomingValueForBlock(PreHead); ConstantInt *InitConst = dyn_cast(CntInitVal); if (!InitConst || !InitConst->isZero()) { diff --git a/lib/Transforms/Utils/FlattenCFG.cpp b/lib/Transforms/Utils/FlattenCFG.cpp index 1da226bfcbe..39c80f86b67 100644 --- a/lib/Transforms/Utils/FlattenCFG.cpp +++ b/lib/Transforms/Utils/FlattenCFG.cpp @@ -240,7 +240,7 @@ bool FlattenCFGOpt::FlattenParallelAndOr(BasicBlock *BB, IRBuilder<> &Builder, BranchInst *BI = dyn_cast(CurrBlock->getTerminator()); CmpInst *CI = dyn_cast(BI->getCondition()); CmpInst::Predicate Predicate = CI->getPredicate(); - // Cannonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq + // Canonicalize icmp_ne -> icmp_eq, fcmp_one -> fcmp_oeq if ((Predicate == CmpInst::ICMP_NE) || (Predicate == CmpInst::FCMP_ONE)) { CI->setPredicate(ICmpInst::getInversePredicate(Predicate)); BI->swapSuccessors(); diff --git a/lib/Transforms/Utils/SimplifyCFG.cpp b/lib/Transforms/Utils/SimplifyCFG.cpp index a30dcf2fe0c..e43c9e2708d 100644 --- a/lib/Transforms/Utils/SimplifyCFG.cpp +++ b/lib/Transforms/Utils/SimplifyCFG.cpp @@ -62,9 +62,9 @@ static cl::opt SinkCommon("simplifycfg-sink-common", cl::Hidden, cl::init(true), cl::desc("Sink common instructions down to the end block")); -static cl::opt -HoistCondStores("simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), - cl::desc("Hoist conditional stores if an unconditional store preceeds")); +static cl::opt HoistCondStores( + "simplifycfg-hoist-cond-stores", cl::Hidden, cl::init(true), + cl::desc("Hoist conditional stores if an unconditional store precedes")); STATISTIC(NumBitMaps, "Number of switch instructions turned into bitmaps"); STATISTIC(NumLookupTables, "Number of switch instructions turned into lookup tables"); diff --git a/lib/Transforms/Vectorize/LoopVectorize.cpp b/lib/Transforms/Vectorize/LoopVectorize.cpp index 695ee03ea76..892c42755b4 100644 --- a/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2371,7 +2371,7 @@ void InnerLoopVectorizer::vectorizeLoop() { setDebugLocFromInst(Builder, RdxDesc.StartValue); // We need to generate a reduction vector from the incoming scalar. - // To do so, we need to generate the 'identity' vector and overide + // To do so, we need to generate the 'identity' vector and override // one of the elements with the incoming scalar reduction. We need // to do it in the vector-loop preheader. Builder.SetInsertPoint(LoopBypassBlocks.front()->getTerminator()); @@ -3713,8 +3713,8 @@ void AccessAnalysis::processMemAccesses(bool UseDeferred) { } bool NeedDepCheck = false; - // Check whether there is the possiblity of dependency because of underlying - // objects being the same. + // Check whether there is the possibility of dependency because of + // underlying objects being the same. typedef SmallVector ValueVector; ValueVector TempObjects; GetUnderlyingObjects(Ptr, TempObjects, DL); diff --git a/lib/Transforms/Vectorize/SLPVectorizer.cpp b/lib/Transforms/Vectorize/SLPVectorizer.cpp index 0e2a98e6de1..80d9ffccafb 100644 --- a/lib/Transforms/Vectorize/SLPVectorizer.cpp +++ b/lib/Transforms/Vectorize/SLPVectorizer.cpp @@ -1871,7 +1871,7 @@ private: StoreListMap StoreRefs; }; -/// \brief Check that the Values in the slice in VL array are still existant in +/// \brief Check that the Values in the slice in VL array are still existent in /// the WeakVH array. /// Vectorization of part of the VL array may cause later values in the VL array /// to become invalid. We track when this has happened in the WeakVH array. @@ -2516,7 +2516,7 @@ bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) { break; } - // Start over at the next instruction of a differnt type (or the end). + // Start over at the next instruction of a different type (or the end). IncIt = SameTypeIt; } } diff --git a/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll b/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll index 52e6683c9f0..66df9d19234 100644 --- a/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll +++ b/test/Analysis/ScalarEvolution/2012-05-18-LoopPredRecurse.ll @@ -4,7 +4,7 @@ ; getUDivExpr()->getZeroExtendExpr()->isLoopBackedgeGuardedBy() ; ; We actually want SCEV simplification to fail gracefully in this -; case, so there's no output to check, just the absense of stack overflow. +; case, so there's no output to check, just the absence of stack overflow. @c = common global i8 0, align 1 diff --git a/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll b/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll index 0cd5c301842..e1c5d452647 100644 --- a/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll +++ b/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll @@ -43,7 +43,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i16 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %s.addr = alloca i32*, align 8 %A.addr = alloca %struct.StructA*, align 8 @@ -98,7 +98,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i16 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %A.addr = alloca %struct.StructA*, align 8 %B.addr = alloca %struct.StructB*, align 8 @@ -127,7 +127,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %A.addr = alloca %struct.StructA*, align 8 %B.addr = alloca %struct.StructB*, align 8 @@ -155,7 +155,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %A.addr = alloca %struct.StructA*, align 8 %B.addr = alloca %struct.StructB*, align 8 @@ -184,7 +184,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %A.addr = alloca %struct.StructA*, align 8 %S.addr = alloca %struct.StructS*, align 8 @@ -212,7 +212,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i16 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %A.addr = alloca %struct.StructA*, align 8 %S.addr = alloca %struct.StructS*, align 8 @@ -240,7 +240,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %S.addr = alloca %struct.StructS*, align 8 %S2.addr = alloca %struct.StructS2*, align 8 @@ -268,7 +268,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i16 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %S.addr = alloca %struct.StructS*, align 8 %S2.addr = alloca %struct.StructS2*, align 8 @@ -296,7 +296,7 @@ entry: ; OPT: define ; OPT: store i32 1 ; OPT: store i32 4 -; Remove a load and propogate the value from store. +; Remove a load and propagate the value from store. ; OPT: ret i32 1 %C.addr = alloca %struct.StructC*, align 8 %D.addr = alloca %struct.StructD*, align 8 diff --git a/test/Bindings/Ocaml/vmcore.ml b/test/Bindings/Ocaml/vmcore.ml index 167efce0b2b..017ef831a47 100644 --- a/test/Bindings/Ocaml/vmcore.ml +++ b/test/Bindings/Ocaml/vmcore.ml @@ -413,7 +413,7 @@ let test_global_values () = let test_global_variables () = let (++) x f = f x; x in - let fourty_two32 = const_int i32_type 42 in + let forty_two32 = const_int i32_type 42 in group "declarations"; begin (* CHECK: @GVar01 = external global i32 @@ -444,16 +444,16 @@ let test_global_variables () = * CHECK: @QGVar02 = addrspace(3) global i32 42 * CHECK: @QGVar03 = addrspace(3) global i32 42 *) - let g = define_global "GVar02" fourty_two32 m in + let g = define_global "GVar02" forty_two32 m in let g2 = declare_global i32_type "GVar03" m ++ - set_initializer fourty_two32 in + set_initializer forty_two32 in insist (not (is_declaration g)); insist (not (is_declaration g2)); insist ((global_initializer g) == (global_initializer g2)); - let g = define_qualified_global "QGVar02" fourty_two32 3 m in + let g = define_qualified_global "QGVar02" forty_two32 3 m in let g2 = declare_qualified_global i32_type "QGVar03" 3 m ++ - set_initializer fourty_two32 in + set_initializer forty_two32 in insist (not (is_declaration g)); insist (not (is_declaration g2)); insist ((global_initializer g) == (global_initializer g2)); @@ -462,34 +462,34 @@ let test_global_variables () = (* CHECK: GVar04{{.*}}thread_local *) group "threadlocal"; - let g = define_global "GVar04" fourty_two32 m ++ + let g = define_global "GVar04" forty_two32 m ++ set_thread_local true in insist (is_thread_local g); (* CHECK: GVar05{{.*}}thread_local(initialexec) *) group "threadlocal_mode"; - let g = define_global "GVar05" fourty_two32 m ++ + let g = define_global "GVar05" forty_two32 m ++ set_thread_local_mode ThreadLocalMode.InitialExec in insist ((thread_local_mode g) = ThreadLocalMode.InitialExec); (* CHECK: GVar06{{.*}}externally_initialized *) group "externally_initialized"; - let g = define_global "GVar06" fourty_two32 m ++ + let g = define_global "GVar06" forty_two32 m ++ set_externally_initialized true in insist (is_externally_initialized g); (* CHECK-NOWHERE-NOT: GVar07 *) group "delete"; - let g = define_global "GVar07" fourty_two32 m in + let g = define_global "GVar07" forty_two32 m in delete_global g; (* CHECK: ConstGlobalVar{{.*}}constant *) group "constant"; - let g = define_global "ConstGlobalVar" fourty_two32 m in + let g = define_global "ConstGlobalVar" forty_two32 m in insist (not (is_global_constant g)); set_global_constant true g; insist (is_global_constant g); diff --git a/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll b/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll index 189926941eb..0a9bc3c87f9 100644 --- a/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll +++ b/test/CodeGen/ARM/struct_byval_arm_t1_t2.ll @@ -13,7 +13,7 @@ ;structs at varying alignments. Each test is run for arm, thumb2 and thumb1. ;We check for the strings in the generated object code using llvm-objdump ;because it provides better assurance that we are generating instructions -;for the correct architecture. Otherwise we could accidently generate an +;for the correct architecture. Otherwise we could accidentally generate an ;ARM instruction for THUMB1 and wouldn't detect it because the assembly ;code representation is the same, but the object code would be generated ;incorrectly. For each test we check for the label, a load instruction of the diff --git a/test/CodeGen/MSP430/misched-msp430.ll b/test/CodeGen/MSP430/misched-msp430.ll index 24ca47b2e04..c8541eff583 100644 --- a/test/CodeGen/MSP430/misched-msp430.ll +++ b/test/CodeGen/MSP430/misched-msp430.ll @@ -7,7 +7,7 @@ target datalayout = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16" ; Test that the MI Scheduler's initPolicy does not crash when i32 is ; unsupported. The content of the asm check below is unimportant. It -; only verifies that the code generator ran succesfully. +; only verifies that the code generator ran successfully. ; ; CHECK-LABEL: @f ; CHECK: mov.w &y, &x diff --git a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll index 24e27cbf14b..f25ab228060 100644 --- a/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll +++ b/test/CodeGen/Mips/msa/llvm-stress-s449609655-simplified.ll @@ -10,7 +10,7 @@ ; The legalizer legalized ; the <4 x i8>'s into <4 x i32>'s, then a call to ; isVSplat() returned the splat value for as a 32-bit APInt ; (255), but the zeroinitializer splat value as an 8-bit APInt (0). The -; assertion occured when trying to check the values were bitwise inverses of +; assertion occurred when trying to check the values were bitwise inverses of ; each-other. ; ; It should at least successfully build. diff --git a/test/CodeGen/SystemZ/fp-cmp-04.ll b/test/CodeGen/SystemZ/fp-cmp-04.ll index 94f4b7cb370..781a3beb4d4 100644 --- a/test/CodeGen/SystemZ/fp-cmp-04.ll +++ b/test/CodeGen/SystemZ/fp-cmp-04.ll @@ -1,4 +1,4 @@ -; Test that floating-point compares are ommitted if CC already has the +; Test that floating-point compares are omitted if CC already has the ; right value. ; ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s diff --git a/test/CodeGen/SystemZ/frame-08.ll b/test/CodeGen/SystemZ/frame-08.ll index da2a6142fb4..aa4e3f481da 100644 --- a/test/CodeGen/SystemZ/frame-08.ll +++ b/test/CodeGen/SystemZ/frame-08.ll @@ -208,7 +208,7 @@ define void @f4(i32 *%ptr, i64 %x) { ret void } -; This is the largest frame size for which the prepatory increment for +; This is the largest frame size for which the preparatory increment for ; "lmg %r14, %r15, ..." can be done using AGHI. define void @f5(i32 *%ptr, i64 %x) { ; CHECK-LABEL: f5: @@ -242,7 +242,7 @@ define void @f5(i32 *%ptr, i64 %x) { ret void } -; This is the smallest frame size for which the prepatory increment for +; This is the smallest frame size for which the preparatory increment for ; "lmg %r14, %r15, ..." needs to be done using AGFI. define void @f6(i32 *%ptr, i64 %x) { ; CHECK-LABEL: f6: diff --git a/test/CodeGen/SystemZ/int-cmp-44.ll b/test/CodeGen/SystemZ/int-cmp-44.ll index 822dcac3059..f065e642129 100644 --- a/test/CodeGen/SystemZ/int-cmp-44.ll +++ b/test/CodeGen/SystemZ/int-cmp-44.ll @@ -1,4 +1,4 @@ -; Test that compares are ommitted if CC already has the right value +; Test that compares are omitted if CC already has the right value ; (z10 version). ; ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | FileCheck %s diff --git a/test/CodeGen/SystemZ/int-cmp-45.ll b/test/CodeGen/SystemZ/int-cmp-45.ll index 753a528e46c..9c9c49c05df 100644 --- a/test/CodeGen/SystemZ/int-cmp-45.ll +++ b/test/CodeGen/SystemZ/int-cmp-45.ll @@ -1,4 +1,4 @@ -; Test that compares are ommitted if CC already has the right value +; Test that compares are omitted if CC already has the right value ; (z196 version). ; ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s diff --git a/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll b/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll index 3455b68fb0e..bbba796eed2 100644 --- a/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll +++ b/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll @@ -3,7 +3,7 @@ ; During X86 fastisel, the address of indirect call was resolved ; through bitcast, ptrtoint, and inttoptr instructions. This is valid ; only if the related instructions are in that same basic block, otherwise -; we may reference variables that were not live accross basic blocks +; we may reference variables that were not live across basic blocks ; resulting in undefined virtual registers. ; ; In this example, this is illustrated by a the spill/reload of the @@ -25,7 +25,7 @@ ; CHECK: movq [[ARG2_SLOT]], %rdi ; Load the second argument ; CHECK: movq [[ARG2_SLOT]], %rsi -; Load the thrid argument +; Load the third argument ; CHECK: movq [[ARG2_SLOT]], %rdx ; Load the function pointer. ; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]] @@ -64,7 +64,7 @@ label_end: ; CHECK: movq [[ARG2_SLOT]], %rdi ; Load the second argument ; CHECK: movq [[ARG2_SLOT]], %rsi -; Load the thrid argument +; Load the third argument ; CHECK: movq [[ARG2_SLOT]], %rdx ; Load the function pointer. ; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]] @@ -103,7 +103,7 @@ label_end: ; CHECK: movq [[ARG2_SLOT]], %rdi ; Load the second argument ; CHECK: movq [[ARG2_SLOT]], %rsi -; Load the thrid argument +; Load the third argument ; CHECK: movq [[ARG2_SLOT]], %rdx ; Load the function pointer. ; CHECK: movq [[LOADED_PTR_SLOT]], [[FCT_PTR:%[a-z]+]] diff --git a/test/CodeGen/X86/block-placement.ll b/test/CodeGen/X86/block-placement.ll index d3e05d6fbed..2681c109ef5 100644 --- a/test/CodeGen/X86/block-placement.ll +++ b/test/CodeGen/X86/block-placement.ll @@ -701,7 +701,7 @@ exit: define void @unanalyzable_branch_to_best_succ(i1 %cond) { ; Ensure that we can handle unanalyzable branches where the destination block -; gets selected as the optimal sucessor to merge. +; gets selected as the optimal successor to merge. ; ; CHECK: unanalyzable_branch_to_best_succ ; CHECK: %entry diff --git a/test/CodeGen/X86/load-slice.ll b/test/CodeGen/X86/load-slice.ll index 85fd7f03ef6..49eb13160bb 100644 --- a/test/CodeGen/X86/load-slice.ll +++ b/test/CodeGen/X86/load-slice.ll @@ -6,7 +6,7 @@ %class.Complex = type { float, float } -; Check that independant slices leads to independant loads then the slices leads to +; Check that independent slices leads to independent loads then the slices leads to ; different register file. ; ; The layout is: diff --git a/test/CodeGen/X86/shl_undef.ll b/test/CodeGen/X86/shl_undef.ll index 666c9728106..705af5b4e33 100644 --- a/test/CodeGen/X86/shl_undef.ll +++ b/test/CodeGen/X86/shl_undef.ll @@ -4,7 +4,7 @@ ; %tmp1676 = xor i32 %tmp1634, %tmp1530 have zero demanded bits after ; DAGCombiner optimization pass. These are changed to undef and in turn ; the successor shl(s) become shl undef, 1. This pattern then matches -; shl x, 1 -> add x, x. add undef, undef doesn't guarentee the low +; shl x, 1 -> add x, x. add undef, undef doesn't guarantee the low ; order bit is zero and is incorrect. ; ; See rdar://9453156 and rdar://9487392. diff --git a/test/DebugInfo/X86/tls.ll b/test/DebugInfo/X86/tls.ll index 745c2f40f67..7cf630184e2 100644 --- a/test/DebugInfo/X86/tls.ll +++ b/test/DebugInfo/X86/tls.ll @@ -6,7 +6,7 @@ ; 10 bytes of data in this DW_FORM_block1 representation of the location of 'tls' ; CHECK: .byte 10{{ *}}# DW_AT_location -; DW_OP_const8u (0x0e == 14) of adress +; DW_OP_const8u (0x0e == 14) of address ; CHECK: .byte 14 ; The debug relocation of the address of the tls variable ; CHECK: .quad tls@DTPOFF diff --git a/test/MC/ARM/eh-directive-setfp.s b/test/MC/ARM/eh-directive-setfp.s index dfa79e622d2..ce7fe10b43c 100644 --- a/test/MC/ARM/eh-directive-setfp.s +++ b/test/MC/ARM/eh-directive-setfp.s @@ -9,7 +9,7 @@ @ then libunwind will reconstruct the stack pointer from the frame pointer. @ The reconstruction code is implemented by two different unwind opcode: @ (i) the unwind opcode to copy stack offset from the other register, and -@ (ii) the unwind opcode to add or substract the stack offset. +@ (ii) the unwind opcode to add or subtract the stack offset. @ @ This file includes several cases separated by different range of -offset @ diff --git a/test/MC/ARM/elf-thumbfunc-reloc.ll b/test/MC/ARM/elf-thumbfunc-reloc.ll index 9fd360e1a01..d869c22906c 100644 --- a/test/MC/ARM/elf-thumbfunc-reloc.ll +++ b/test/MC/ARM/elf-thumbfunc-reloc.ll @@ -3,7 +3,7 @@ ; RUN: FileCheck %s ; FIXME: This file needs to be in .s form! -; We wanna test relocatable thumb function call, +; We want to test relocatable thumb function call, ; but ARMAsmParser cannot handle "bl foo(PLT)" yet target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:32-n32" diff --git a/test/MC/COFF/bss.s b/test/MC/COFF/bss.s index 86294c18683..17ae0daa1c3 100644 --- a/test/MC/COFF/bss.s +++ b/test/MC/COFF/bss.s @@ -1,4 +1,4 @@ -// The purpose of this test is to verify that bss sections are emited correctly. +// The purpose of this test is to verify that bss sections are emitted correctly. // RUN: llvm-mc -filetype=obj -triple i686-pc-win32 %s | llvm-readobj -s | FileCheck %s // RUN: llvm-mc -filetype=obj -triple x86_64-pc-win32 %s | llvm-readobj -s | FileCheck %s diff --git a/test/MC/COFF/section-name-encoding.s b/test/MC/COFF/section-name-encoding.s index 0f531f39756..c41a81eb1c8 100644 --- a/test/MC/COFF/section-name-encoding.s +++ b/test/MC/COFF/section-name-encoding.s @@ -52,7 +52,7 @@ pad_sections aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa // /1000029 == 4 + 10 + (5 * (2 + (20 * 10 * 1000) + 1)) // v | | v ~~~~~~~~~~~~~~ v -// table size v v "p0" pad NUL seperator +// table size v v "p0" pad NUL separator // "s12345678\0" # of pad sections // // CHECK: Section { diff --git a/test/MC/MachO/bss.s b/test/MC/MachO/bss.s index 15d490ad5eb..c5afe107782 100644 --- a/test/MC/MachO/bss.s +++ b/test/MC/MachO/bss.s @@ -1,4 +1,4 @@ -// The purpose of this test is to verify that bss sections are emited correctly. +// The purpose of this test is to verify that bss sections are emitted correctly. // RUN: llvm-mc -filetype=obj -triple i686-apple-darwin9 %s | llvm-readobj -s | FileCheck %s // RUN: llvm-mc -filetype=obj -triple x86_64-apple-darwin9 %s | llvm-readobj -s | FileCheck %s diff --git a/test/Transforms/Inline/ptr-diff.ll b/test/Transforms/Inline/ptr-diff.ll index af42bc7cede..46c3bcd4dc5 100644 --- a/test/Transforms/Inline/ptr-diff.ll +++ b/test/Transforms/Inline/ptr-diff.ll @@ -31,7 +31,7 @@ else: define i32 @outer2(i32* %ptr) { ; Test that an inbounds GEP disables this -- it isn't safe in general as -; wrapping changes the behavior of lessthan and greaterthan comparisions. +; wrapping changes the behavior of lessthan and greaterthan comparisons. ; CHECK-LABEL: @outer2( ; CHECK: call i32 @inner2 ; CHECK: ret i32 diff --git a/test/Transforms/LoopVectorize/value-ptr-bug.ll b/test/Transforms/LoopVectorize/value-ptr-bug.ll index e8d37285f80..6b06afaf0de 100644 --- a/test/Transforms/LoopVectorize/value-ptr-bug.ll +++ b/test/Transforms/LoopVectorize/value-ptr-bug.ll @@ -4,7 +4,7 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3 ; PR16073 -; Because we were caching value pointers accross a function call that could RAUW +; Because we were caching value pointers across a function call that could RAUW ; we would generate an undefined value store below: ; SCEVExpander::expandCodeFor would change a value (the start value of an ; induction) that we cached in the induction variable list. diff --git a/test/Transforms/ObjCARC/allocas.ll b/test/Transforms/ObjCARC/allocas.ll index 50656739ae7..7347a8fd444 100644 --- a/test/Transforms/ObjCARC/allocas.ll +++ b/test/Transforms/ObjCARC/allocas.ll @@ -28,7 +28,7 @@ declare void @llvm.dbg.value(metadata, i64, metadata) declare i8* @objc_msgSend(i8*, i8*, ...) -; In the presense of allocas, unconditionally remove retain/release pairs only +; In the presence of allocas, unconditionally remove retain/release pairs only ; if they are known safe in both directions. This prevents matching up an inner ; retain with the boundary guarding release in the following situation: ; @@ -336,7 +336,7 @@ bb3: ret void } -; Make sure in the presense of allocas, if we find a cfghazard we do not perform +; Make sure in the presence of allocas, if we find a cfghazard we do not perform ; code motion even if we are known safe. These two concepts are separate and ; should be treated as such. ; diff --git a/tools/lli/RemoteMemoryManager.cpp b/tools/lli/RemoteMemoryManager.cpp index c9d426a4e68..e9f4d53ff9e 100644 --- a/tools/lli/RemoteMemoryManager.cpp +++ b/tools/lli/RemoteMemoryManager.cpp @@ -109,7 +109,7 @@ void RemoteMemoryManager::notifyObjectLoaded(ExecutionEngine *EE, CurOffset += Size; } } - // Adjust to keep code and data aligned on seperate pages. + // Adjust to keep code and data aligned on separate pages. CurOffset = (CurOffset + MaxAlign - 1) / MaxAlign * MaxAlign; for (size_t i = 0, e = NumSections; i != e; ++i) { Allocation &Section = UnmappedSections[i]; diff --git a/tools/lli/RemoteTarget.h b/tools/lli/RemoteTarget.h index 9803589eccf..73e8ae2284d 100644 --- a/tools/lli/RemoteTarget.h +++ b/tools/lli/RemoteTarget.h @@ -96,7 +96,7 @@ public: virtual bool executeCode(uint64_t Address, int &RetVal); - /// Minimum alignment for memory permissions. Used to seperate code and + /// Minimum alignment for memory permissions. Used to separate code and /// data regions to make sure data doesn't get marked as code or vice /// versa. /// diff --git a/tools/lli/RemoteTargetExternal.h b/tools/lli/RemoteTargetExternal.h index 17218a8c238..587849b9069 100644 --- a/tools/lli/RemoteTargetExternal.h +++ b/tools/lli/RemoteTargetExternal.h @@ -81,7 +81,7 @@ public: /// descriptive text of the encountered error. virtual bool executeCode(uint64_t Address, int &RetVal); - /// Minimum alignment for memory permissions. Used to seperate code and + /// Minimum alignment for memory permissions. Used to separate code and /// data regions to make sure data doesn't get marked as code or vice /// versa. /// diff --git a/tools/llvm-objdump/COFFDump.cpp b/tools/llvm-objdump/COFFDump.cpp index 106dcba37aa..7b60a5d4dcc 100644 --- a/tools/llvm-objdump/COFFDump.cpp +++ b/tools/llvm-objdump/COFFDump.cpp @@ -10,7 +10,7 @@ /// \file /// \brief This file implements the COFF-specific dumper for llvm-objdump. /// It outputs the Win64 EH data structures as plain text. -/// The encoding of the unwind codes is decribed in MSDN: +/// The encoding of the unwind codes is described in MSDN: /// http://msdn.microsoft.com/en-us/library/ck9asaa9.aspx /// //===----------------------------------------------------------------------===// diff --git a/unittests/Support/FileOutputBufferTest.cpp b/unittests/Support/FileOutputBufferTest.cpp index b81bdb579dd..6d62999ab87 100644 --- a/unittests/Support/FileOutputBufferTest.cpp +++ b/unittests/Support/FileOutputBufferTest.cpp @@ -68,7 +68,7 @@ TEST(FileOutputBuffer, Test) { memcpy(Buffer2->getBufferStart(), "AABBCCDDEEFFGGHHIIJJ", 20); // Do *not* commit buffer. } - // Verify file does not exist (because buffer not commited). + // Verify file does not exist (because buffer not committed). bool Exists = false; ASSERT_NO_ERROR(fs::exists(Twine(File2), Exists)); EXPECT_FALSE(Exists); diff --git a/unittests/Support/SwapByteOrderTest.cpp b/unittests/Support/SwapByteOrderTest.cpp index c2a0c279388..85ac6f3e8dd 100644 --- a/unittests/Support/SwapByteOrderTest.cpp +++ b/unittests/Support/SwapByteOrderTest.cpp @@ -17,7 +17,7 @@ using namespace llvm; namespace { -// In these first two tests all of the origional_uintx values are truncated +// In these first two tests all of the original_uintx values are truncated // except for 64. We could avoid this, but there's really no point. TEST(SwapByteOrder, UnsignedRoundTrip) { @@ -25,21 +25,21 @@ TEST(SwapByteOrder, UnsignedRoundTrip) { // in every byte. uint64_t value = 1; for (std::size_t i = 0; i <= sizeof(value); ++i) { - uint8_t origional_uint8 = static_cast(value); - EXPECT_EQ(origional_uint8, - sys::SwapByteOrder(sys::SwapByteOrder(origional_uint8))); + uint8_t original_uint8 = static_cast(value); + EXPECT_EQ(original_uint8, + sys::SwapByteOrder(sys::SwapByteOrder(original_uint8))); - uint16_t origional_uint16 = static_cast(value); - EXPECT_EQ(origional_uint16, - sys::SwapByteOrder(sys::SwapByteOrder(origional_uint16))); + uint16_t original_uint16 = static_cast(value); + EXPECT_EQ(original_uint16, + sys::SwapByteOrder(sys::SwapByteOrder(original_uint16))); - uint32_t origional_uint32 = static_cast(value); - EXPECT_EQ(origional_uint32, - sys::SwapByteOrder(sys::SwapByteOrder(origional_uint32))); + uint32_t original_uint32 = static_cast(value); + EXPECT_EQ(original_uint32, + sys::SwapByteOrder(sys::SwapByteOrder(original_uint32))); - uint64_t origional_uint64 = static_cast(value); - EXPECT_EQ(origional_uint64, - sys::SwapByteOrder(sys::SwapByteOrder(origional_uint64))); + uint64_t original_uint64 = static_cast(value); + EXPECT_EQ(original_uint64, + sys::SwapByteOrder(sys::SwapByteOrder(original_uint64))); value = (value << 8) | 0x55; // binary 0101 0101. } @@ -50,40 +50,40 @@ TEST(SwapByteOrder, SignedRoundTrip) { // in every byte. uint64_t value = 1; for (std::size_t i = 0; i <= sizeof(value); ++i) { - int8_t origional_int8 = static_cast(value); - EXPECT_EQ(origional_int8, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int8))); + int8_t original_int8 = static_cast(value); + EXPECT_EQ(original_int8, + sys::SwapByteOrder(sys::SwapByteOrder(original_int8))); - int16_t origional_int16 = static_cast(value); - EXPECT_EQ(origional_int16, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int16))); + int16_t original_int16 = static_cast(value); + EXPECT_EQ(original_int16, + sys::SwapByteOrder(sys::SwapByteOrder(original_int16))); - int32_t origional_int32 = static_cast(value); - EXPECT_EQ(origional_int32, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int32))); + int32_t original_int32 = static_cast(value); + EXPECT_EQ(original_int32, + sys::SwapByteOrder(sys::SwapByteOrder(original_int32))); - int64_t origional_int64 = static_cast(value); - EXPECT_EQ(origional_int64, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int64))); + int64_t original_int64 = static_cast(value); + EXPECT_EQ(original_int64, + sys::SwapByteOrder(sys::SwapByteOrder(original_int64))); // Test other sign. value *= -1; - origional_int8 = static_cast(value); - EXPECT_EQ(origional_int8, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int8))); + original_int8 = static_cast(value); + EXPECT_EQ(original_int8, + sys::SwapByteOrder(sys::SwapByteOrder(original_int8))); - origional_int16 = static_cast(value); - EXPECT_EQ(origional_int16, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int16))); + original_int16 = static_cast(value); + EXPECT_EQ(original_int16, + sys::SwapByteOrder(sys::SwapByteOrder(original_int16))); - origional_int32 = static_cast(value); - EXPECT_EQ(origional_int32, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int32))); + original_int32 = static_cast(value); + EXPECT_EQ(original_int32, + sys::SwapByteOrder(sys::SwapByteOrder(original_int32))); - origional_int64 = static_cast(value); - EXPECT_EQ(origional_int64, - sys::SwapByteOrder(sys::SwapByteOrder(origional_int64))); + original_int64 = static_cast(value); + EXPECT_EQ(original_int64, + sys::SwapByteOrder(sys::SwapByteOrder(original_int64))); // Return to normal sign and twiddle. value *= -1; diff --git a/utils/TableGen/CodeGenRegisters.h b/utils/TableGen/CodeGenRegisters.h index 37f75b4a486..a44d8b083e5 100644 --- a/utils/TableGen/CodeGenRegisters.h +++ b/utils/TableGen/CodeGenRegisters.h @@ -335,7 +335,7 @@ namespace llvm { // getSubClasses - Returns a constant BitVector of subclasses indexed by // EnumValue. - // The SubClasses vector includs an entry for this class. + // The SubClasses vector includes an entry for this class. const BitVector &getSubClasses() const { return SubClasses; } // getSuperClasses - Returns a list of super classes ordered by EnumValue. diff --git a/utils/TableGen/CodeGenSchedule.cpp b/utils/TableGen/CodeGenSchedule.cpp index dd06433d6ab..39af61f7564 100644 --- a/utils/TableGen/CodeGenSchedule.cpp +++ b/utils/TableGen/CodeGenSchedule.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file defines structures to encapsulate the machine model as decribed in +// This file defines structures to encapsulate the machine model as described in // the target description. // //===----------------------------------------------------------------------===// diff --git a/utils/TableGen/CodeGenSchedule.h b/utils/TableGen/CodeGenSchedule.h index fa964cf23d2..5ce679a2593 100644 --- a/utils/TableGen/CodeGenSchedule.h +++ b/utils/TableGen/CodeGenSchedule.h @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This file defines structures to encapsulate the machine model as decribed in +// This file defines structures to encapsulate the machine model as described in // the target description. // //===----------------------------------------------------------------------===// @@ -162,7 +162,7 @@ struct CodeGenSchedClass { // ModelName is a unique name used to name an instantiation of MCSchedModel. // // ModelDef is NULL for inferred Models. This happens when a processor defines -// an itinerary but no machine model. If the processer defines neither a machine +// an itinerary but no machine model. If the processor defines neither a machine // model nor itinerary, then ModelDef remains pointing to NoModel. NoModel has // the special "NoModel" field set to true. // diff --git a/utils/wciia.py b/utils/wciia.py index c838819ebe4..eaa232f84af 100755 --- a/utils/wciia.py +++ b/utils/wciia.py @@ -110,7 +110,7 @@ if not os.path.exists(path): owners_name = find_owners(path) -# be gramatically correct +# be grammatically correct print "The owner(s) of the (" + path + ") is(are) : " + str(owners_name) exit(0) -- cgit v1.2.3