summaryrefslogtreecommitdiff
path: root/js/src/jit/StupidAllocator.cpp
blob: 8e3ea628650d9cc4d892d050a713ddeb66f81c8d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
 * vim: set ts=8 sts=4 et sw=4 tw=99:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#include "jit/StupidAllocator.h"

#include "jstypes.h"

using namespace js;
using namespace js::jit;

static inline uint32_t
DefaultStackSlot(uint32_t vreg)
{
    // On x86/x64, we have to keep the stack aligned on 16 bytes for spilling
    // SIMD registers.  To avoid complexity in this stupid allocator, we just
    // allocate 16 bytes stack slot for all vreg.
    return vreg * 2 * sizeof(Value);
}

LAllocation*
StupidAllocator::stackLocation(uint32_t vreg)
{
    LDefinition* def = virtualRegisters[vreg];
    if (def->policy() == LDefinition::FIXED && def->output()->isArgument())
        return def->output();

    return new(alloc()) LStackSlot(DefaultStackSlot(vreg));
}

StupidAllocator::RegisterIndex
StupidAllocator::registerIndex(AnyRegister reg)
{
    for (size_t i = 0; i < registerCount; i++) {
        if (reg == registers[i].reg)
            return i;
    }
    MOZ_CRASH("Bad register");
}

bool
StupidAllocator::init()
{
    if (!RegisterAllocator::init())
        return false;

    if (!virtualRegisters.appendN((LDefinition*)nullptr, graph.numVirtualRegisters()))
        return false;

    for (size_t i = 0; i < graph.numBlocks(); i++) {
        LBlock* block = graph.getBlock(i);
        for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            for (size_t j = 0; j < ins->numDefs(); j++) {
                LDefinition* def = ins->getDef(j);
                virtualRegisters[def->virtualRegister()] = def;
            }

            for (size_t j = 0; j < ins->numTemps(); j++) {
                LDefinition* def = ins->getTemp(j);
                if (def->isBogusTemp())
                    continue;
                virtualRegisters[def->virtualRegister()] = def;
            }
        }
        for (size_t j = 0; j < block->numPhis(); j++) {
            LPhi* phi = block->getPhi(j);
            LDefinition* def = phi->getDef(0);
            uint32_t vreg = def->virtualRegister();

            virtualRegisters[vreg] = def;
        }
    }

    // Assign physical registers to the tracked allocation.
    {
        registerCount = 0;
        LiveRegisterSet remainingRegisters(allRegisters_.asLiveSet());
        while (!remainingRegisters.emptyGeneral())
            registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyGeneral());

        while (!remainingRegisters.emptyFloat())
            registers[registerCount++].reg = AnyRegister(remainingRegisters.takeAnyFloat());

        MOZ_ASSERT(registerCount <= MAX_REGISTERS);
    }

    return true;
}

bool
StupidAllocator::allocationRequiresRegister(const LAllocation* alloc, AnyRegister reg)
{
    if (alloc->isRegister() && alloc->toRegister() == reg)
        return true;
    if (alloc->isUse()) {
        const LUse* use = alloc->toUse();
        if (use->policy() == LUse::FIXED) {
            AnyRegister usedReg = GetFixedRegister(virtualRegisters[use->virtualRegister()], use);
            if (usedReg.aliases(reg))
                return true;
        }
    }
    return false;
}

bool
StupidAllocator::registerIsReserved(LInstruction* ins, AnyRegister reg)
{
    // Whether reg is already reserved for an input or output of ins.
    for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
        if (allocationRequiresRegister(*alloc, reg))
            return true;
    }
    for (size_t i = 0; i < ins->numTemps(); i++) {
        if (allocationRequiresRegister(ins->getTemp(i)->output(), reg))
            return true;
    }
    for (size_t i = 0; i < ins->numDefs(); i++) {
        if (allocationRequiresRegister(ins->getDef(i)->output(), reg))
            return true;
    }
    return false;
}

AnyRegister
StupidAllocator::ensureHasRegister(LInstruction* ins, uint32_t vreg)
{
    // Ensure that vreg is held in a register before ins.

    // Check if the virtual register is already held in a physical register.
    RegisterIndex existing = findExistingRegister(vreg);
    if (existing != UINT32_MAX) {
        if (registerIsReserved(ins, registers[existing].reg)) {
            evictAliasedRegister(ins, existing);
        } else {
            registers[existing].age = ins->id();
            return registers[existing].reg;
        }
    }

    RegisterIndex best = allocateRegister(ins, vreg);
    loadRegister(ins, vreg, best, virtualRegisters[vreg]->type());

    return registers[best].reg;
}

StupidAllocator::RegisterIndex
StupidAllocator::allocateRegister(LInstruction* ins, uint32_t vreg)
{
    // Pick a register for vreg, evicting an existing register if necessary.
    // Spill code will be placed before ins, and no existing allocated input
    // for ins will be touched.
    MOZ_ASSERT(ins);

    LDefinition* def = virtualRegisters[vreg];
    MOZ_ASSERT(def);

    RegisterIndex best = UINT32_MAX;

    for (size_t i = 0; i < registerCount; i++) {
        AnyRegister reg = registers[i].reg;

        if (!def->isCompatibleReg(reg))
            continue;

        // Skip the register if it is in use for an allocated input or output.
        if (registerIsReserved(ins, reg))
            continue;

        if (registers[i].vreg == MISSING_ALLOCATION ||
            best == UINT32_MAX ||
            registers[best].age > registers[i].age)
        {
            best = i;
        }
    }

    evictAliasedRegister(ins, best);
    return best;
}

void
StupidAllocator::syncRegister(LInstruction* ins, RegisterIndex index)
{
    if (registers[index].dirty) {
        LMoveGroup* input = getInputMoveGroup(ins);
        LAllocation source(registers[index].reg);

        uint32_t existing = registers[index].vreg;
        LAllocation* dest = stackLocation(existing);
        input->addAfter(source, *dest, registers[index].type);

        registers[index].dirty = false;
    }
}

void
StupidAllocator::evictRegister(LInstruction* ins, RegisterIndex index)
{
    syncRegister(ins, index);
    registers[index].set(MISSING_ALLOCATION);
}

void
StupidAllocator::evictAliasedRegister(LInstruction* ins, RegisterIndex index)
{
    for (size_t i = 0; i < registers[index].reg.numAliased(); i++) {
        uint32_t aindex = registerIndex(registers[index].reg.aliased(i));
        syncRegister(ins, aindex);
        registers[aindex].set(MISSING_ALLOCATION);
    }
}

void
StupidAllocator::loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex index, LDefinition::Type type)
{
    // Load a vreg from its stack location to a register.
    LMoveGroup* input = getInputMoveGroup(ins);
    LAllocation* source = stackLocation(vreg);
    LAllocation dest(registers[index].reg);
    input->addAfter(*source, dest, type);
    registers[index].set(vreg, ins);
    registers[index].type = type;
}

StupidAllocator::RegisterIndex
StupidAllocator::findExistingRegister(uint32_t vreg)
{
    for (size_t i = 0; i < registerCount; i++) {
        if (registers[i].vreg == vreg)
            return i;
    }
    return UINT32_MAX;
}

bool
StupidAllocator::go()
{
    // This register allocator is intended to be as simple as possible, while
    // still being complicated enough to share properties with more complicated
    // allocators. Namely, physical registers may be used to carry virtual
    // registers across LIR instructions, but not across basic blocks.
    //
    // This algorithm does not pay any attention to liveness. It is performed
    // as a single forward pass through the basic blocks in the program. As
    // virtual registers and temporaries are defined they are assigned physical
    // registers, evicting existing allocations in an LRU fashion.

    // For virtual registers not carried in a register, a canonical spill
    // location is used. Each vreg has a different spill location; since we do
    // not track liveness we cannot determine that two vregs have disjoint
    // lifetimes. Thus, the maximum stack height is the number of vregs (scaled
    // by two on 32 bit platforms to allow storing double values).
    graph.setLocalSlotCount(DefaultStackSlot(graph.numVirtualRegisters()));

    if (!init())
        return false;

    for (size_t blockIndex = 0; blockIndex < graph.numBlocks(); blockIndex++) {
        LBlock* block = graph.getBlock(blockIndex);
        MOZ_ASSERT(block->mir()->id() == blockIndex);

        for (size_t i = 0; i < registerCount; i++)
            registers[i].set(MISSING_ALLOCATION);

        for (LInstructionIterator iter = block->begin(); iter != block->end(); iter++) {
            LInstruction* ins = *iter;

            if (ins == *block->rbegin())
                syncForBlockEnd(block, ins);

            allocateForInstruction(ins);
        }
    }

    return true;
}

void
StupidAllocator::syncForBlockEnd(LBlock* block, LInstruction* ins)
{
    // Sync any dirty registers, and update the synced state for phi nodes at
    // each successor of a block. We cannot conflate the storage for phis with
    // that of their inputs, as we cannot prove the live ranges of the phi and
    // its input do not overlap. The values for the two may additionally be
    // different, as the phi could be for the value of the input in a previous
    // loop iteration.

    for (size_t i = 0; i < registerCount; i++)
        syncRegister(ins, i);

    LMoveGroup* group = nullptr;

    MBasicBlock* successor = block->mir()->successorWithPhis();
    if (successor) {
        uint32_t position = block->mir()->positionInPhiSuccessor();
        LBlock* lirsuccessor = successor->lir();
        for (size_t i = 0; i < lirsuccessor->numPhis(); i++) {
            LPhi* phi = lirsuccessor->getPhi(i);

            uint32_t sourcevreg = phi->getOperand(position)->toUse()->virtualRegister();
            uint32_t destvreg = phi->getDef(0)->virtualRegister();

            if (sourcevreg == destvreg)
                continue;

            LAllocation* source = stackLocation(sourcevreg);
            LAllocation* dest = stackLocation(destvreg);

            if (!group) {
                // The moves we insert here need to happen simultaneously with
                // each other, yet after any existing moves before the instruction.
                LMoveGroup* input = getInputMoveGroup(ins);
                if (input->numMoves() == 0) {
                    group = input;
                } else {
                    group = LMoveGroup::New(alloc());
                    block->insertAfter(input, group);
                }
            }

            group->add(*source, *dest, phi->getDef(0)->type());
        }
    }
}

void
StupidAllocator::allocateForInstruction(LInstruction* ins)
{
    // Sync all registers before making a call.
    if (ins->isCall()) {
        for (size_t i = 0; i < registerCount; i++)
            syncRegister(ins, i);
    }

    // Allocate for inputs which are required to be in registers.
    for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
        if (!alloc->isUse())
            continue;
        LUse* use = alloc->toUse();
        uint32_t vreg = use->virtualRegister();
        if (use->policy() == LUse::REGISTER) {
            AnyRegister reg = ensureHasRegister(ins, vreg);
            alloc.replace(LAllocation(reg));
        } else if (use->policy() == LUse::FIXED) {
            AnyRegister reg = GetFixedRegister(virtualRegisters[vreg], use);
            RegisterIndex index = registerIndex(reg);
            if (registers[index].vreg != vreg) {
                // Need to evict multiple registers
                evictAliasedRegister(ins, registerIndex(reg));
                // If this vreg is already assigned to an incorrect register
                RegisterIndex existing = findExistingRegister(vreg);
                if (existing != UINT32_MAX)
                    evictRegister(ins, existing);
                loadRegister(ins, vreg, index, virtualRegisters[vreg]->type());
            }
            alloc.replace(LAllocation(reg));
        } else {
            // Inputs which are not required to be in a register are not
            // allocated until after temps/definitions, as the latter may need
            // to evict registers which hold these inputs.
        }
    }

    // Find registers to hold all temporaries and outputs of the instruction.
    for (size_t i = 0; i < ins->numTemps(); i++) {
        LDefinition* def = ins->getTemp(i);
        if (!def->isBogusTemp())
            allocateForDefinition(ins, def);
    }
    for (size_t i = 0; i < ins->numDefs(); i++) {
        LDefinition* def = ins->getDef(i);
        allocateForDefinition(ins, def);
    }

    // Allocate for remaining inputs which do not need to be in registers.
    for (LInstruction::InputIterator alloc(*ins); alloc.more(); alloc.next()) {
        if (!alloc->isUse())
            continue;
        LUse* use = alloc->toUse();
        uint32_t vreg = use->virtualRegister();
        MOZ_ASSERT(use->policy() != LUse::REGISTER && use->policy() != LUse::FIXED);

        RegisterIndex index = findExistingRegister(vreg);
        if (index == UINT32_MAX) {
            LAllocation* stack = stackLocation(use->virtualRegister());
            alloc.replace(*stack);
        } else {
            registers[index].age = ins->id();
            alloc.replace(LAllocation(registers[index].reg));
        }
    }

    // If this is a call, evict all registers except for those holding outputs.
    if (ins->isCall()) {
        for (size_t i = 0; i < registerCount; i++) {
            if (!registers[i].dirty)
                registers[i].set(MISSING_ALLOCATION);
        }
    }
}

void
StupidAllocator::allocateForDefinition(LInstruction* ins, LDefinition* def)
{
    uint32_t vreg = def->virtualRegister();

    CodePosition from;
    if ((def->output()->isRegister() && def->policy() == LDefinition::FIXED) ||
        def->policy() == LDefinition::MUST_REUSE_INPUT)
    {
        // Result will be in a specific register, spill any vreg held in
        // that register before the instruction.
        RegisterIndex index =
            registerIndex(def->policy() == LDefinition::FIXED
                          ? def->output()->toRegister()
                          : ins->getOperand(def->getReusedInput())->toRegister());
        evictRegister(ins, index);
        registers[index].set(vreg, ins, true);
        registers[index].type = virtualRegisters[vreg]->type();
        def->setOutput(LAllocation(registers[index].reg));
    } else if (def->policy() == LDefinition::FIXED) {
        // The result must be a stack location.
        def->setOutput(*stackLocation(vreg));
    } else {
        // Find a register to hold the result of the instruction.
        RegisterIndex best = allocateRegister(ins, vreg);
        registers[best].set(vreg, ins, true);
        registers[best].type = virtualRegisters[vreg]->type();
        def->setOutput(LAllocation(registers[best].reg));
    }
}