| | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| | 2 | // Redistribution and use in source and binary forms, with or without |
| | 3 | // modification, are permitted provided that the following conditions are |
| | 4 | // met: |
| | 5 | // |
| | 6 | // * Redistributions of source code must retain the above copyright |
| | 7 | // notice, this list of conditions and the following disclaimer. |
| | 8 | // * Redistributions in binary form must reproduce the above |
| | 9 | // copyright notice, this list of conditions and the following |
| | 10 | // disclaimer in the documentation and/or other materials provided |
| | 11 | // with the distribution. |
| | 12 | // * Neither the name of Google Inc. nor the names of its |
| | 13 | // contributors may be used to endorse or promote products derived |
| | 14 | // from this software without specific prior written permission. |
| | 15 | // |
| | 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| | 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| | 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| | 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| | 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| | 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| | 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| | 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| | 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| | 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| | 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| | 27 | |
| | 28 | // Platform specific code for Haiku goes here. For the POSIX comaptible parts |
| | 29 | // the implementation is in platform-posix.cc. |
| | 30 | |
| | 31 | #include <pthread.h> |
| | 32 | #include <semaphore.h> |
| | 33 | #include <signal.h> |
| | 34 | #include <sys/time.h> |
| | 35 | #include <sys/resource.h> |
| | 36 | #include <sys/types.h> |
| | 37 | #include <stdlib.h> |
| | 38 | #include <sys/types.h> // mmap & munmap |
| | 39 | #include <sys/mman.h> // mmap & munmap |
| | 40 | #include <sys/stat.h> // open |
| | 41 | #include <fcntl.h> // open |
| | 42 | #include <unistd.h> // sysconf |
| | 43 | #include <strings.h> // index |
| | 44 | #include <errno.h> |
| | 45 | #include <stdarg.h> |
| | 46 | #include <kernel/OS.h> |
| | 47 | |
| | 48 | #undef MAP_TYPE |
| | 49 | |
| | 50 | #include "v8.h" |
| | 51 | |
| | 52 | #include "platform-posix.h" |
| | 53 | #include "platform.h" |
| | 54 | #include "v8threads.h" |
| | 55 | #include "vm-state-inl.h" |
| | 56 | |
| | 57 | |
| | 58 | namespace v8 { |
| | 59 | namespace internal { |
| | 60 | |
| | 61 | // pthread_t's are pointers to structs on Haiku |
| | 62 | static const pthread_t kNoThread = NULL; |
| | 63 | |
| | 64 | |
| | 65 | double ceiling(double x) { |
| | 66 | return ceil(x); |
| | 67 | } |
| | 68 | |
| | 69 | |
| | 70 | static Mutex* limit_mutex = NULL; |
| | 71 | |
| | 72 | |
| | 73 | void OS::PostSetUp() { |
| | 74 | POSIXPostSetUp(); |
| | 75 | } |
| | 76 | |
| | 77 | |
| | 78 | uint64_t OS::CpuFeaturesImpliedByPlatform() { |
| | 79 | return 0; // Haiku runs on anything. |
| | 80 | } |
| | 81 | |
| | 82 | |
| | 83 | int OS::ActivationFrameAlignment() { |
| | 84 | // With gcc 4.4 the tree vectorization optimizer can generate code |
| | 85 | // that requires 16 byte alignment such as movdqa on x86. |
| | 86 | return 16; |
| | 87 | } |
| | 88 | |
| | 89 | |
| | 90 | void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { |
| | 91 | #if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \ |
| | 92 | (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__)) |
| | 93 | // Only use on ARM or MIPS hardware. |
| | 94 | MemoryBarrier(); |
| | 95 | #else |
| | 96 | __asm__ __volatile__("" : : : "memory"); |
| | 97 | // An x86 store acts as a release barrier. |
| | 98 | #endif |
| | 99 | *ptr = value; |
| | 100 | } |
| | 101 | |
| | 102 | |
| | 103 | const char* OS::LocalTimezone(double time) { |
| | 104 | if (isnan(time)) return ""; |
| | 105 | time_t tv = static_cast<time_t>(floor(time/msPerSecond)); |
| | 106 | struct tm* t = localtime(&tv); |
| | 107 | if (NULL == t) return ""; |
| | 108 | return t->tm_zone; |
| | 109 | } |
| | 110 | |
| | 111 | |
| | 112 | double OS::LocalTimeOffset() { |
| | 113 | time_t tv = time(NULL); |
| | 114 | struct tm* t = localtime(&tv); |
| | 115 | // tm_gmtoff includes any daylight savings offset, so subtract it. |
| | 116 | return static_cast<double>(t->tm_gmtoff * msPerSecond - |
| | 117 | (t->tm_isdst > 0 ? 3600 * msPerSecond : 0)); |
| | 118 | } |
| | 119 | |
| | 120 | |
| | 121 | // We keep the lowest and highest addresses mapped as a quick way of |
| | 122 | // determining that pointers are outside the heap (used mostly in assertions |
| | 123 | // and verification). The estimate is conservative, i.e., not all addresses in |
| | 124 | // 'allocated' space are actually allocated to our heap. The range is |
| | 125 | // [lowest, highest), inclusive on the low and and exclusive on the high end. |
| | 126 | static void* lowest_ever_allocated = reinterpret_cast<void*>(-1); |
| | 127 | static void* highest_ever_allocated = reinterpret_cast<void*>(0); |
| | 128 | |
| | 129 | |
| | 130 | static void UpdateAllocatedSpaceLimits(void* address, int size) { |
| | 131 | ASSERT(limit_mutex != NULL); |
| | 132 | ScopedLock lock(limit_mutex); |
| | 133 | |
| | 134 | lowest_ever_allocated = Min(lowest_ever_allocated, address); |
| | 135 | highest_ever_allocated = |
| | 136 | Max(highest_ever_allocated, |
| | 137 | reinterpret_cast<void*>(reinterpret_cast<char*>(address) + size)); |
| | 138 | } |
| | 139 | |
| | 140 | |
| | 141 | bool OS::IsOutsideAllocatedSpace(void* address) { |
| | 142 | return address < lowest_ever_allocated || address >= highest_ever_allocated; |
| | 143 | } |
| | 144 | |
| | 145 | |
| | 146 | size_t OS::AllocateAlignment() { |
| | 147 | return sysconf(_SC_PAGESIZE); |
| | 148 | } |
| | 149 | |
| | 150 | |
| | 151 | void* OS::Allocate(const size_t requested, |
| | 152 | size_t* allocated, |
| | 153 | bool is_executable) { |
| | 154 | const size_t msize = RoundUp(requested, AllocateAlignment()); |
| | 155 | int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
| | 156 | void* addr = OS::GetRandomMmapAddr(); |
| | 157 | void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| | 158 | if (mbase == MAP_FAILED) { |
| | 159 | LOG(i::Isolate::Current(), |
| | 160 | StringEvent("OS::Allocate", "mmap failed")); |
| | 161 | return NULL; |
| | 162 | } |
| | 163 | *allocated = msize; |
| | 164 | UpdateAllocatedSpaceLimits(mbase, msize); |
| | 165 | return mbase; |
| | 166 | } |
| | 167 | |
| | 168 | |
| | 169 | void OS::Free(void* address, const size_t size) { |
| | 170 | // TODO(1240712): munmap has a return value which is ignored here. |
| | 171 | int result = munmap(address, size); |
| | 172 | USE(result); |
| | 173 | ASSERT(result == 0); |
| | 174 | } |
| | 175 | |
| | 176 | |
| | 177 | void OS::Sleep(int milliseconds) { |
| | 178 | unsigned int ms = static_cast<unsigned int>(milliseconds); |
| | 179 | usleep(1000 * ms); |
| | 180 | } |
| | 181 | |
| | 182 | |
| | 183 | void OS::Abort() { |
| | 184 | // Redirect to std abort to signal abnormal program termination. |
| | 185 | if (FLAG_break_on_abort) { |
| | 186 | DebugBreak(); |
| | 187 | } |
| | 188 | abort(); |
| | 189 | } |
| | 190 | |
| | 191 | |
| | 192 | void OS::DebugBreak() { |
| | 193 | // TODO(lrn): Introduce processor define for runtime system (!= V8_ARCH_x, |
| | 194 | // which is the architecture of generated code). |
| | 195 | #if (defined(__arm__) || defined(__thumb__)) |
| | 196 | # if defined(CAN_USE_ARMV5_INSTRUCTIONS) |
| | 197 | asm("bkpt 0"); |
| | 198 | # endif |
| | 199 | #elif defined(__mips__) |
| | 200 | asm("break"); |
| | 201 | #else |
| | 202 | asm("int $3"); |
| | 203 | #endif |
| | 204 | } |
| | 205 | |
| | 206 | |
| | 207 | class PosixMemoryMappedFile : public OS::MemoryMappedFile { |
| | 208 | public: |
| | 209 | PosixMemoryMappedFile(FILE* file, void* memory, int size) |
| | 210 | : file_(file), memory_(memory), size_(size) { } |
| | 211 | virtual ~PosixMemoryMappedFile(); |
| | 212 | virtual void* memory() { return memory_; } |
| | 213 | virtual int size() { return size_; } |
| | 214 | private: |
| | 215 | FILE* file_; |
| | 216 | void* memory_; |
| | 217 | int size_; |
| | 218 | }; |
| | 219 | |
| | 220 | |
| | 221 | OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { |
| | 222 | FILE* file = fopen(name, "r+"); |
| | 223 | if (file == NULL) return NULL; |
| | 224 | |
| | 225 | fseek(file, 0, SEEK_END); |
| | 226 | int size = ftell(file); |
| | 227 | |
| | 228 | void* memory = |
| | 229 | mmap(OS::GetRandomMmapAddr(), |
| | 230 | size, |
| | 231 | PROT_READ | PROT_WRITE, |
| | 232 | MAP_SHARED, |
| | 233 | fileno(file), |
| | 234 | 0); |
| | 235 | return new PosixMemoryMappedFile(file, memory, size); |
| | 236 | } |
| | 237 | |
| | 238 | |
| | 239 | OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, |
| | 240 | void* initial) { |
| | 241 | FILE* file = fopen(name, "w+"); |
| | 242 | if (file == NULL) return NULL; |
| | 243 | int result = fwrite(initial, size, 1, file); |
| | 244 | if (result < 1) { |
| | 245 | fclose(file); |
| | 246 | return NULL; |
| | 247 | } |
| | 248 | void* memory = |
| | 249 | mmap(OS::GetRandomMmapAddr(), |
| | 250 | size, |
| | 251 | PROT_READ | PROT_WRITE, |
| | 252 | MAP_SHARED, |
| | 253 | fileno(file), |
| | 254 | 0); |
| | 255 | return new PosixMemoryMappedFile(file, memory, size); |
| | 256 | } |
| | 257 | |
| | 258 | |
| | 259 | PosixMemoryMappedFile::~PosixMemoryMappedFile() { |
| | 260 | if (memory_) OS::Free(memory_, size_); |
| | 261 | fclose(file_); |
| | 262 | } |
| | 263 | |
| | 264 | |
| | 265 | void OS::LogSharedLibraryAddresses() { |
| | 266 | // TODO |
| | 267 | // Loop through images (get_next_image_info()) and print data/text |
| | 268 | // addresses. |
| | 269 | } |
| | 270 | |
| | 271 | |
| | 272 | void OS::SignalCodeMovingGC() { |
| | 273 | } |
| | 274 | |
| | 275 | |
| | 276 | int OS::StackWalk(Vector<OS::StackFrame> frames) { |
| | 277 | // dladdr |
| | 278 | return 0; |
| | 279 | } |
| | 280 | |
| | 281 | // No MAP_NORESERVE on Haiku means we can't reserve without |
| | 282 | // committing or decommit at all. |
| | 283 | #define MAP_NORESERVE 0 |
| | 284 | |
| | 285 | // Constants used for mmap. |
| | 286 | static const int kMmapFd = -1; |
| | 287 | static const int kMmapFdOffset = 0; |
| | 288 | |
| | 289 | VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } |
| | 290 | |
| | 291 | VirtualMemory::VirtualMemory(size_t size) { |
| | 292 | address_ = ReserveRegion(size); |
| | 293 | size_ = size; |
| | 294 | } |
| | 295 | |
| | 296 | |
| | 297 | VirtualMemory::VirtualMemory(size_t size, size_t alignment) |
| | 298 | : address_(NULL), size_(0) { |
| | 299 | ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment()))); |
| | 300 | size_t request_size = RoundUp(size + alignment, |
| | 301 | static_cast<intptr_t>(OS::AllocateAlignment())); |
| | 302 | void* reservation = mmap(OS::GetRandomMmapAddr(), |
| | 303 | request_size, |
| | 304 | PROT_NONE, |
| | 305 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| | 306 | kMmapFd, |
| | 307 | kMmapFdOffset); |
| | 308 | if (reservation == MAP_FAILED) return; |
| | 309 | |
| | 310 | Address base = static_cast<Address>(reservation); |
| | 311 | Address aligned_base = RoundUp(base, alignment); |
| | 312 | ASSERT_LE(base, aligned_base); |
| | 313 | |
| | 314 | // Unmap extra memory reserved before and after the desired block. |
| | 315 | if (aligned_base != base) { |
| | 316 | size_t prefix_size = static_cast<size_t>(aligned_base - base); |
| | 317 | OS::Free(base, prefix_size); |
| | 318 | request_size -= prefix_size; |
| | 319 | } |
| | 320 | |
| | 321 | size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); |
| | 322 | ASSERT_LE(aligned_size, request_size); |
| | 323 | |
| | 324 | if (aligned_size != request_size) { |
| | 325 | size_t suffix_size = request_size - aligned_size; |
| | 326 | OS::Free(aligned_base + aligned_size, suffix_size); |
| | 327 | request_size -= suffix_size; |
| | 328 | } |
| | 329 | |
| | 330 | ASSERT(aligned_size == request_size); |
| | 331 | |
| | 332 | address_ = static_cast<void*>(aligned_base); |
| | 333 | size_ = aligned_size; |
| | 334 | } |
| | 335 | |
| | 336 | |
| | 337 | VirtualMemory::~VirtualMemory() { |
| | 338 | if (IsReserved()) { |
| | 339 | bool result = ReleaseRegion(address(), size()); |
| | 340 | ASSERT(result); |
| | 341 | USE(result); |
| | 342 | } |
| | 343 | } |
| | 344 | |
| | 345 | |
| | 346 | bool VirtualMemory::IsReserved() { |
| | 347 | return address_ != NULL; |
| | 348 | } |
| | 349 | |
| | 350 | |
| | 351 | void VirtualMemory::Reset() { |
| | 352 | address_ = NULL; |
| | 353 | size_ = 0; |
| | 354 | } |
| | 355 | |
| | 356 | |
| | 357 | bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { |
| | 358 | return CommitRegion(address, size, is_executable); |
| | 359 | } |
| | 360 | |
| | 361 | |
| | 362 | bool VirtualMemory::Uncommit(void* address, size_t size) { |
| | 363 | return UncommitRegion(address, size); |
| | 364 | } |
| | 365 | |
| | 366 | |
| | 367 | bool VirtualMemory::Guard(void* address) { |
| | 368 | OS::Guard(address, OS::CommitPageSize()); |
| | 369 | return true; |
| | 370 | } |
| | 371 | |
| | 372 | |
| | 373 | void* VirtualMemory::ReserveRegion(size_t size) { |
| | 374 | void* result = mmap(OS::GetRandomMmapAddr(), |
| | 375 | size, |
| | 376 | PROT_NONE, |
| | 377 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, |
| | 378 | kMmapFd, |
| | 379 | kMmapFdOffset); |
| | 380 | |
| | 381 | if (result == MAP_FAILED) return NULL; |
| | 382 | |
| | 383 | return result; |
| | 384 | } |
| | 385 | |
| | 386 | |
| | 387 | bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { |
| | 388 | int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); |
| | 389 | if (MAP_FAILED == mmap(base, |
| | 390 | size, |
| | 391 | prot, |
| | 392 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, |
| | 393 | kMmapFd, |
| | 394 | kMmapFdOffset)) { |
| | 395 | return false; |
| | 396 | } |
| | 397 | |
| | 398 | UpdateAllocatedSpaceLimits(base, size); |
| | 399 | return true; |
| | 400 | } |
| | 401 | |
| | 402 | |
| | 403 | bool VirtualMemory::UncommitRegion(void* base, size_t size) { |
| | 404 | return mmap(base, |
| | 405 | size, |
| | 406 | PROT_NONE, |
| | 407 | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED, |
| | 408 | kMmapFd, |
| | 409 | kMmapFdOffset) != MAP_FAILED; |
| | 410 | } |
| | 411 | |
| | 412 | |
| | 413 | bool VirtualMemory::ReleaseRegion(void* base, size_t size) { |
| | 414 | return munmap(base, size) == 0; |
| | 415 | } |
| | 416 | |
| | 417 | |
| | 418 | bool VirtualMemory::HasLazyCommits() { |
| | 419 | return true; |
| | 420 | } |
| | 421 | |
| | 422 | |
| | 423 | class Thread::PlatformData : public Malloced { |
| | 424 | public: |
| | 425 | PlatformData() : thread_(kNoThread) {} |
| | 426 | |
| | 427 | pthread_t thread_; // Thread handle for pthread. |
| | 428 | }; |
| | 429 | |
| | 430 | Thread::Thread(const Options& options) |
| | 431 | : data_(new PlatformData()), |
| | 432 | stack_size_(options.stack_size()) { |
| | 433 | set_name(options.name()); |
| | 434 | } |
| | 435 | |
| | 436 | |
| | 437 | Thread::~Thread() { |
| | 438 | delete data_; |
| | 439 | } |
| | 440 | |
| | 441 | |
| | 442 | static void* ThreadEntry(void* arg) { |
| | 443 | Thread* thread = reinterpret_cast<Thread*>(arg); |
| | 444 | thread->data()->thread_ = pthread_self(); |
| | 445 | ASSERT(thread->data()->thread_ != kNoThread); |
| | 446 | thread->Run(); |
| | 447 | return NULL; |
| | 448 | } |
| | 449 | |
| | 450 | |
| | 451 | void Thread::set_name(const char* name) { |
| | 452 | strncpy(name_, name, sizeof(name_)); |
| | 453 | name_[sizeof(name_) - 1] = '\0'; |
| | 454 | } |
| | 455 | |
| | 456 | |
| | 457 | void Thread::Start() { |
| | 458 | pthread_attr_t* attr_ptr = NULL; |
| | 459 | pthread_attr_t attr; |
| | 460 | if (stack_size_ > 0) { |
| | 461 | pthread_attr_init(&attr); |
| | 462 | pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_)); |
| | 463 | attr_ptr = &attr; |
| | 464 | } |
| | 465 | int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this); |
| | 466 | CHECK_EQ(0, result); |
| | 467 | ASSERT(data_->thread_ != kNoThread); |
| | 468 | } |
| | 469 | |
| | 470 | |
| | 471 | void Thread::Join() { |
| | 472 | pthread_join(data_->thread_, NULL); |
| | 473 | } |
| | 474 | |
| | 475 | |
| | 476 | Thread::LocalStorageKey Thread::CreateThreadLocalKey() { |
| | 477 | pthread_key_t key; |
| | 478 | int result = pthread_key_create(&key, NULL); |
| | 479 | USE(result); |
| | 480 | ASSERT(result == 0); |
| | 481 | return static_cast<LocalStorageKey>(key); |
| | 482 | } |
| | 483 | |
| | 484 | |
| | 485 | void Thread::DeleteThreadLocalKey(LocalStorageKey key) { |
| | 486 | pthread_key_t pthread_key = static_cast<pthread_key_t>(key); |
| | 487 | int result = pthread_key_delete(pthread_key); |
| | 488 | USE(result); |
| | 489 | ASSERT(result == 0); |
| | 490 | } |
| | 491 | |
| | 492 | |
| | 493 | void* Thread::GetThreadLocal(LocalStorageKey key) { |
| | 494 | pthread_key_t pthread_key = static_cast<pthread_key_t>(key); |
| | 495 | return pthread_getspecific(pthread_key); |
| | 496 | } |
| | 497 | |
| | 498 | |
| | 499 | void Thread::SetThreadLocal(LocalStorageKey key, void* value) { |
| | 500 | pthread_key_t pthread_key = static_cast<pthread_key_t>(key); |
| | 501 | pthread_setspecific(pthread_key, value); |
| | 502 | } |
| | 503 | |
| | 504 | |
| | 505 | void Thread::YieldCPU() { |
| | 506 | sched_yield(); |
| | 507 | } |
| | 508 | |
| | 509 | |
| | 510 | class HaikuMutex : public Mutex { |
| | 511 | public: |
| | 512 | HaikuMutex() { |
| | 513 | pthread_mutexattr_t attrs; |
| | 514 | int result = pthread_mutexattr_init(&attrs); |
| | 515 | ASSERT(result == 0); |
| | 516 | result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); |
| | 517 | ASSERT(result == 0); |
| | 518 | result = pthread_mutex_init(&mutex_, &attrs); |
| | 519 | ASSERT(result == 0); |
| | 520 | USE(result); |
| | 521 | } |
| | 522 | |
| | 523 | virtual ~HaikuMutex() { pthread_mutex_destroy(&mutex_); } |
| | 524 | |
| | 525 | virtual int Lock() { |
| | 526 | int result = pthread_mutex_lock(&mutex_); |
| | 527 | return result; |
| | 528 | } |
| | 529 | |
| | 530 | virtual int Unlock() { |
| | 531 | int result = pthread_mutex_unlock(&mutex_); |
| | 532 | return result; |
| | 533 | } |
| | 534 | |
| | 535 | virtual bool TryLock() { |
| | 536 | int result = pthread_mutex_trylock(&mutex_); |
| | 537 | // Return false if the lock is busy and locking failed. |
| | 538 | if (result == EBUSY) { |
| | 539 | return false; |
| | 540 | } |
| | 541 | ASSERT(result == 0); // Verify no other errors. |
| | 542 | return true; |
| | 543 | } |
| | 544 | |
| | 545 | private: |
| | 546 | pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. |
| | 547 | }; |
| | 548 | |
| | 549 | |
| | 550 | Mutex* OS::CreateMutex() { |
| | 551 | return new HaikuMutex(); |
| | 552 | } |
| | 553 | |
| | 554 | |
| | 555 | class HaikuSemaphore : public Semaphore { |
| | 556 | public: |
| | 557 | explicit HaikuSemaphore(int count) { sem_ = create_sem(count, ""); } |
| | 558 | virtual ~HaikuSemaphore() { delete_sem(sem_); } |
| | 559 | |
| | 560 | virtual void Wait(); |
| | 561 | virtual bool Wait(int timeout); |
| | 562 | virtual void Signal() { release_sem(sem_); } |
| | 563 | private: |
| | 564 | sem_id sem_; |
| | 565 | }; |
| | 566 | |
| | 567 | |
| | 568 | void HaikuSemaphore::Wait() { |
| | 569 | while (true) { |
| | 570 | int result = acquire_sem(sem_); |
| | 571 | if (result == B_NO_ERROR) return; // Successfully got semaphore. |
| | 572 | CHECK(result == B_INTERRUPTED); // Signal caused spurious wakeup. |
| | 573 | } |
| | 574 | } |
| | 575 | |
| | 576 | |
| | 577 | #ifndef TIMEVAL_TO_TIMESPEC |
| | 578 | #define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ |
| | 579 | (ts)->tv_sec = (tv)->tv_sec; \ |
| | 580 | (ts)->tv_nsec = (tv)->tv_usec * 1000; \ |
| | 581 | } while (false) |
| | 582 | #endif |
| | 583 | |
| | 584 | |
| | 585 | bool HaikuSemaphore::Wait(int timeout) { |
| | 586 | // Wait for semaphore signalled or timeout. |
| | 587 | while (true) { |
| | 588 | int result = acquire_sem_etc(sem_, 1, B_RELATIVE_TIMEOUT, timeout); |
| | 589 | if (result == B_NO_ERROR) return true; // Successfully got semaphore. |
| | 590 | if (result == B_TIMED_OUT || result == B_WOULD_BLOCK) return false; // Timeout. |
| | 591 | CHECK(result == B_INTERRUPTED); // Signal caused spurious wakeup. |
| | 592 | } |
| | 593 | } |
| | 594 | |
| | 595 | |
| | 596 | Semaphore* OS::CreateSemaphore(int count) { |
| | 597 | return new HaikuSemaphore(count); |
| | 598 | } |
| | 599 | |
| | 600 | |
| | 601 | static pthread_t GetThreadID() { |
| | 602 | return pthread_self(); |
| | 603 | } |
| | 604 | |
| | 605 | |
| | 606 | static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { |
| | 607 | USE(info); |
| | 608 | if (signal != SIGPROF) return; |
| | 609 | |
| | 610 | Isolate* isolate = Isolate::UncheckedCurrent(); |
| | 611 | if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { |
| | 612 | // We require a fully initialized and entered isolate. |
| | 613 | return; |
| | 614 | } |
| | 615 | if (v8::Locker::IsActive() && |
| | 616 | !isolate->thread_manager()->IsLockedByCurrentThread()) { |
| | 617 | return; |
| | 618 | } |
| | 619 | |
| | 620 | Sampler* sampler = isolate->logger()->sampler(); |
| | 621 | if (sampler == NULL || !sampler->IsActive()) return; |
| | 622 | |
| | 623 | TickSample sample_obj; |
| | 624 | TickSample* sample = CpuProfiler::StartTickSampleEvent(isolate); |
| | 625 | if (sample == NULL) sample = &sample_obj; |
| | 626 | |
| | 627 | // Extracting the sample from the context is extremely machine dependent. |
| | 628 | ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context); |
| | 629 | mcontext_t& mcontext = ucontext->uc_mcontext; |
| | 630 | sample->state = isolate->current_vm_state(); |
| | 631 | #if V8_HOST_ARCH_IA32 |
| | 632 | sample->pc = reinterpret_cast<Address>(mcontext.eip); |
| | 633 | sample->sp = reinterpret_cast<Address>(mcontext.esp); |
| | 634 | sample->fp = reinterpret_cast<Address>(mcontext.ebp); |
| | 635 | #elif V8_HOST_ARCH_X64 |
| | 636 | sample->pc = reinterpret_cast<Address>(mcontext.rip); |
| | 637 | sample->sp = reinterpret_cast<Address>(mcontext.rsp); |
| | 638 | sample->fp = reinterpret_cast<Address>(mcontext.rbp); |
| | 639 | #elif V8_HOST_ARCH_ARM |
| | 640 | sample->pc = reinterpret_cast<Address>(mcontext.r15); |
| | 641 | sample->sp = reinterpret_cast<Address>(mcontext.r13); |
| | 642 | sample->fp = reinterpret_cast<Address>(mcontext.r11); |
| | 643 | #elif V8_HOST_ARCH_MIPS |
| | 644 | #error TODO |
| | 645 | #endif // V8_HOST_ARCH_* |
| | 646 | sampler->SampleStack(sample); |
| | 647 | sampler->Tick(sample); |
| | 648 | CpuProfiler::FinishTickSampleEvent(isolate); |
| | 649 | } |
| | 650 | |
| | 651 | |
| | 652 | class CpuProfilerSignalHandler { |
| | 653 | public: |
| | 654 | static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); } |
| | 655 | static void TearDown() { delete mutex_; } |
| | 656 | |
| | 657 | static void InstallSignalHandler() { |
| | 658 | struct sigaction sa; |
| | 659 | ScopedLock lock(mutex_); |
| | 660 | if (signal_handler_installed_counter_ > 0) { |
| | 661 | signal_handler_installed_counter_++; |
| | 662 | return; |
| | 663 | } |
| | 664 | sa.sa_sigaction = ProfilerSignalHandler; |
| | 665 | sigemptyset(&sa.sa_mask); |
| | 666 | sa.sa_flags = SA_RESTART | SA_SIGINFO; |
| | 667 | if (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0) { |
| | 668 | signal_handler_installed_counter_++; |
| | 669 | } |
| | 670 | } |
| | 671 | |
| | 672 | static void RestoreSignalHandler() { |
| | 673 | ScopedLock lock(mutex_); |
| | 674 | if (signal_handler_installed_counter_ == 0) |
| | 675 | return; |
| | 676 | if (signal_handler_installed_counter_ == 1) { |
| | 677 | sigaction(SIGPROF, &old_signal_handler_, 0); |
| | 678 | } |
| | 679 | signal_handler_installed_counter_--; |
| | 680 | } |
| | 681 | |
| | 682 | static bool signal_handler_installed() { |
| | 683 | return signal_handler_installed_counter_ > 0; |
| | 684 | } |
| | 685 | |
| | 686 | private: |
| | 687 | static int signal_handler_installed_counter_; |
| | 688 | static struct sigaction old_signal_handler_; |
| | 689 | static Mutex* mutex_; |
| | 690 | }; |
| | 691 | |
| | 692 | |
| | 693 | int CpuProfilerSignalHandler::signal_handler_installed_counter_ = 0; |
| | 694 | struct sigaction CpuProfilerSignalHandler::old_signal_handler_; |
| | 695 | Mutex* CpuProfilerSignalHandler::mutex_ = NULL; |
| | 696 | |
| | 697 | |
| | 698 | class Sampler::PlatformData : public Malloced { |
| | 699 | public: |
| | 700 | PlatformData() : vm_tid_(GetThreadID()) {} |
| | 701 | |
| | 702 | void SendProfilingSignal() { |
| | 703 | if (!CpuProfilerSignalHandler::signal_handler_installed()) return; |
| | 704 | pthread_kill(vm_tid_, SIGPROF); |
| | 705 | } |
| | 706 | |
| | 707 | private: |
| | 708 | const pthread_t vm_tid_; |
| | 709 | }; |
| | 710 | |
| | 711 | |
| | 712 | class SignalSender : public Thread { |
| | 713 | public: |
| | 714 | enum SleepInterval { |
| | 715 | HALF_INTERVAL, |
| | 716 | FULL_INTERVAL |
| | 717 | }; |
| | 718 | |
| | 719 | static const int kSignalSenderStackSize = 64 * KB; |
| | 720 | |
| | 721 | explicit SignalSender(int interval) |
| | 722 | : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), |
| | 723 | interval_(interval) {} |
| | 724 | |
| | 725 | static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); } |
| | 726 | static void TearDown() { delete mutex_; } |
| | 727 | |
| | 728 | static void AddActiveSampler(Sampler* sampler) { |
| | 729 | ScopedLock lock(mutex_); |
| | 730 | SamplerRegistry::AddActiveSampler(sampler); |
| | 731 | if (instance_ == NULL) { |
| | 732 | // Start a thread that will send SIGPROF signal to VM threads, |
| | 733 | // when CPU profiling will be enabled. |
| | 734 | instance_ = new SignalSender(sampler->interval()); |
| | 735 | instance_->Start(); |
| | 736 | } else { |
| | 737 | ASSERT(instance_->interval_ == sampler->interval()); |
| | 738 | } |
| | 739 | } |
| | 740 | |
| | 741 | static void RemoveActiveSampler(Sampler* sampler) { |
| | 742 | ScopedLock lock(mutex_); |
| | 743 | SamplerRegistry::RemoveActiveSampler(sampler); |
| | 744 | if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { |
| | 745 | RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); |
| | 746 | delete instance_; |
| | 747 | instance_ = NULL; |
| | 748 | } |
| | 749 | } |
| | 750 | |
| | 751 | // Implement Thread::Run(). |
| | 752 | virtual void Run() { |
| | 753 | SamplerRegistry::State state; |
| | 754 | while ((state = SamplerRegistry::GetState()) != |
| | 755 | SamplerRegistry::HAS_NO_SAMPLERS) { |
| | 756 | bool cpu_profiling_enabled = |
| | 757 | (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); |
| | 758 | bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); |
| | 759 | // When CPU profiling is enabled both JavaScript and C++ code is |
| | 760 | // profiled. We must not suspend. |
| | 761 | if (!cpu_profiling_enabled) { |
| | 762 | if (rate_limiter_.SuspendIfNecessary()) continue; |
| | 763 | } |
| | 764 | if (cpu_profiling_enabled && runtime_profiler_enabled) { |
| | 765 | if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) { |
| | 766 | return; |
| | 767 | } |
| | 768 | Sleep(HALF_INTERVAL); |
| | 769 | if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { |
| | 770 | return; |
| | 771 | } |
| | 772 | Sleep(HALF_INTERVAL); |
| | 773 | } else { |
| | 774 | if (cpu_profiling_enabled) { |
| | 775 | if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, NULL)) { |
| | 776 | return; |
| | 777 | } |
| | 778 | } |
| | 779 | if (runtime_profiler_enabled) { |
| | 780 | if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, |
| | 781 | NULL)) { |
| | 782 | return; |
| | 783 | } |
| | 784 | } |
| | 785 | Sleep(FULL_INTERVAL); |
| | 786 | } |
| | 787 | } |
| | 788 | } |
| | 789 | |
| | 790 | static void DoCpuProfile(Sampler* sampler, void*) { |
| | 791 | if (!sampler->IsProfiling()) return; |
| | 792 | sampler->platform_data()->SendProfilingSignal(); |
| | 793 | } |
| | 794 | |
| | 795 | static void DoRuntimeProfile(Sampler* sampler, void* ignored) { |
| | 796 | if (!sampler->isolate()->IsInitialized()) return; |
| | 797 | sampler->isolate()->runtime_profiler()->NotifyTick(); |
| | 798 | } |
| | 799 | |
| | 800 | void Sleep(SleepInterval full_or_half) { |
| | 801 | // Convert ms to us and subtract 100 us to compensate delays |
| | 802 | // occuring during signal delivery. |
| | 803 | useconds_t interval = interval_ * 1000 - 100; |
| | 804 | if (full_or_half == HALF_INTERVAL) interval /= 2; |
| | 805 | int result = usleep(interval); |
| | 806 | #ifdef DEBUG |
| | 807 | if (result != 0 && errno != EINTR) { |
| | 808 | fprintf(stderr, |
| | 809 | "SignalSender usleep error; interval = %lu, errno = %d\n", |
| | 810 | interval, |
| | 811 | errno); |
| | 812 | ASSERT(result == 0 || errno == EINTR); |
| | 813 | } |
| | 814 | #endif |
| | 815 | USE(result); |
| | 816 | } |
| | 817 | |
| | 818 | const int interval_; |
| | 819 | RuntimeProfilerRateLimiter rate_limiter_; |
| | 820 | |
| | 821 | // Protects the process wide state below. |
| | 822 | static Mutex* mutex_; |
| | 823 | static SignalSender* instance_; |
| | 824 | |
| | 825 | private: |
| | 826 | DISALLOW_COPY_AND_ASSIGN(SignalSender); |
| | 827 | }; |
| | 828 | |
| | 829 | |
| | 830 | Mutex* SignalSender::mutex_ = NULL; |
| | 831 | SignalSender* SignalSender::instance_ = NULL; |
| | 832 | |
| | 833 | |
| | 834 | void OS::SetUp() { |
| | 835 | // Seed the random number generator. We preserve microsecond resolution. |
| | 836 | uint64_t seed = Ticks() ^ (getpid() << 16); |
| | 837 | srandom(static_cast<unsigned int>(seed)); |
| | 838 | limit_mutex = CreateMutex(); |
| | 839 | |
| | 840 | SignalSender::SetUp(); |
| | 841 | CpuProfilerSignalHandler::SetUp(); |
| | 842 | } |
| | 843 | |
| | 844 | |
| | 845 | void OS::TearDown() { |
| | 846 | SignalSender::TearDown(); |
| | 847 | CpuProfilerSignalHandler::TearDown(); |
| | 848 | delete limit_mutex; |
| | 849 | } |
| | 850 | |
| | 851 | |
| | 852 | Sampler::Sampler(Isolate* isolate, int interval) |
| | 853 | : isolate_(isolate), |
| | 854 | interval_(interval), |
| | 855 | profiling_(false), |
| | 856 | active_(false), |
| | 857 | has_processing_thread_(false), |
| | 858 | samples_taken_(0) { |
| | 859 | data_ = new PlatformData; |
| | 860 | } |
| | 861 | |
| | 862 | |
| | 863 | Sampler::~Sampler() { |
| | 864 | ASSERT(!IsActive()); |
| | 865 | delete data_; |
| | 866 | } |
| | 867 | |
| | 868 | |
| | 869 | void Sampler::DoSample() { |
| | 870 | platform_data()->SendProfilingSignal(); |
| | 871 | } |
| | 872 | |
| | 873 | |
| | 874 | void Sampler::Start() { |
| | 875 | ASSERT(!IsActive()); |
| | 876 | SetActive(true); |
| | 877 | SignalSender::AddActiveSampler(this); |
| | 878 | } |
| | 879 | |
| | 880 | |
| | 881 | void Sampler::Stop() { |
| | 882 | ASSERT(IsActive()); |
| | 883 | SignalSender::RemoveActiveSampler(this); |
| | 884 | SetActive(false); |
| | 885 | } |
| | 886 | |
| | 887 | |
| | 888 | void Sampler::StartSampling() { |
| | 889 | CpuProfilerSignalHandler::InstallSignalHandler(); |
| | 890 | } |
| | 891 | |
| | 892 | |
| | 893 | void Sampler::StopSampling() { |
| | 894 | CpuProfilerSignalHandler::RestoreSignalHandler(); |
| | 895 | } |
| | 896 | |
| | 897 | |
| | 898 | } } // namespace v8::internal |