AsyncServerSocket.cpp 32.9 KB
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
/*
 * Copyright 2016 Facebook, Inc.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __STDC_FORMAT_MACROS
  #define __STDC_FORMAT_MACROS
#endif

#include <folly/io/async/AsyncServerSocket.h>

#include <folly/FileUtil.h>
#include <folly/Portability.h>
#include <folly/SocketAddress.h>
#include <folly/String.h>
#include <folly/detail/SocketFastOpen.h>
#include <folly/io/async/EventBase.h>
#include <folly/io/async/NotificationQueue.h>
#include <folly/portability/Fcntl.h>
#include <folly/portability/Sockets.h>
#include <folly/portability/Unistd.h>

#include <errno.h>
#include <string.h>
#include <sys/types.h>

namespace fsp = folly::portability::sockets;

namespace folly {

const uint32_t AsyncServerSocket::kDefaultMaxAcceptAtOnce;
const uint32_t AsyncServerSocket::kDefaultCallbackAcceptAtOnce;
const uint32_t AsyncServerSocket::kDefaultMaxMessagesInQueue;

int setCloseOnExec(int fd, int value) {
  // Read the current flags
  int old_flags = fcntl(fd, F_GETFD, 0);

  // If reading the flags failed, return error indication now
  if (old_flags < 0)
    return -1;

  // Set just the flag we want to set
  int new_flags;
  if (value != 0)
    new_flags = old_flags | FD_CLOEXEC;
  else
    new_flags = old_flags & ~FD_CLOEXEC;

  // Store modified flag word in the descriptor
  return fcntl(fd, F_SETFD, new_flags);
}

void AsyncServerSocket::RemoteAcceptor::start(
  EventBase* eventBase, uint32_t maxAtOnce, uint32_t maxInQueue) {
  setMaxReadAtOnce(maxAtOnce);
  queue_.setMaxQueueSize(maxInQueue);

  if (!eventBase->runInEventBaseThread([=](){
        callback_->acceptStarted();
        this->startConsuming(eventBase, &queue_);
      })) {
    throw std::invalid_argument("unable to start waiting on accept "
                            "notification queue in the specified "
                            "EventBase thread");
  }
}

void AsyncServerSocket::RemoteAcceptor::stop(
  EventBase* eventBase, AcceptCallback* callback) {
  if (!eventBase->runInEventBaseThread([=](){
        callback->acceptStopped();
        delete this;
      })) {
    throw std::invalid_argument("unable to start waiting on accept "
                            "notification queue in the specified "
                            "EventBase thread");
  }
}

void AsyncServerSocket::RemoteAcceptor::messageAvailable(
  QueueMessage&& msg) {

  switch (msg.type) {
    case MessageType::MSG_NEW_CONN:
    {
      if (connectionEventCallback_) {
        connectionEventCallback_->onConnectionDequeuedByAcceptorCallback(
            msg.fd, msg.address);
      }
      callback_->connectionAccepted(msg.fd, msg.address);
      break;
    }
    case MessageType::MSG_ERROR:
    {
      std::runtime_error ex(msg.msg);
      callback_->acceptError(ex);
      break;
    }
    default:
    {
      LOG(ERROR) << "invalid accept notification message type "
                 << int(msg.type);
      std::runtime_error ex(
        "received invalid accept notification message type");
      callback_->acceptError(ex);
    }
  }
}

/*
 * AsyncServerSocket::BackoffTimeout
 */
class AsyncServerSocket::BackoffTimeout : public AsyncTimeout {
 public:
  // Disallow copy, move, and default constructors.
  BackoffTimeout(BackoffTimeout&&) = delete;
  explicit BackoffTimeout(AsyncServerSocket* socket)
      : AsyncTimeout(socket->getEventBase()), socket_(socket) {}

  void timeoutExpired() noexcept override { socket_->backoffTimeoutExpired(); }

 private:
  AsyncServerSocket* socket_;
};

/*
 * AsyncServerSocket methods
 */

AsyncServerSocket::AsyncServerSocket(EventBase* eventBase)
:   eventBase_(eventBase),
    accepting_(false),
    maxAcceptAtOnce_(kDefaultMaxAcceptAtOnce),
    maxNumMsgsInQueue_(kDefaultMaxMessagesInQueue),
    acceptRateAdjustSpeed_(0),
    acceptRate_(1),
    lastAccepTimestamp_(std::chrono::steady_clock::now()),
    numDroppedConnections_(0),
    callbackIndex_(0),
    backoffTimeout_(nullptr),
    callbacks_(),
    keepAliveEnabled_(true),
    closeOnExec_(true),
    shutdownSocketSet_(nullptr) {
}

void AsyncServerSocket::setShutdownSocketSet(ShutdownSocketSet* newSS) {
  if (shutdownSocketSet_ == newSS) {
    return;
  }
  if (shutdownSocketSet_) {
    for (auto& h : sockets_) {
      shutdownSocketSet_->remove(h.socket_);
    }
  }
  shutdownSocketSet_ = newSS;
  if (shutdownSocketSet_) {
    for (auto& h : sockets_) {
      shutdownSocketSet_->add(h.socket_);
    }
  }
}

AsyncServerSocket::~AsyncServerSocket() {
  assert(callbacks_.empty());
}

int AsyncServerSocket::stopAccepting(int shutdownFlags) {
  int result = 0;
  for (auto& handler : sockets_) {
    VLOG(10) << "AsyncServerSocket::stopAccepting " << this <<
              handler.socket_;
  }
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  // When destroy is called, unregister and close the socket immediately.
  accepting_ = false;

  // Close the sockets in reverse order as they were opened to avoid
  // the condition where another process concurrently tries to open
  // the same port, succeed to bind the first socket but fails on the
  // second because it hasn't been closed yet.
  for (; !sockets_.empty(); sockets_.pop_back()) {
    auto& handler = sockets_.back();
    handler.unregisterHandler();
    if (shutdownSocketSet_) {
      shutdownSocketSet_->close(handler.socket_);
    } else if (shutdownFlags >= 0) {
      result = shutdownNoInt(handler.socket_, shutdownFlags);
      pendingCloseSockets_.push_back(handler.socket_);
    } else {
      closeNoInt(handler.socket_);
    }
  }

  // Destroy the backoff timout.  This will cancel it if it is running.
  delete backoffTimeout_;
  backoffTimeout_ = nullptr;

  // Close all of the callback queues to notify them that they are being
  // destroyed.  No one should access the AsyncServerSocket any more once
  // destroy() is called.  However, clear out callbacks_ before invoking the
  // accept callbacks just in case.  This will potentially help us detect the
  // bug if one of the callbacks calls addAcceptCallback() or
  // removeAcceptCallback().
  std::vector<CallbackInfo> callbacksCopy;
  callbacks_.swap(callbacksCopy);
  for (std::vector<CallbackInfo>::iterator it = callbacksCopy.begin();
       it != callbacksCopy.end();
       ++it) {
    // consumer may not be set if we are running in primary event base
    if (it->consumer) {
      DCHECK(it->eventBase);
      it->consumer->stop(it->eventBase, it->callback);
    } else {
      DCHECK(it->callback);
      it->callback->acceptStopped();
    }
  }

  return result;
}

void AsyncServerSocket::destroy() {
  stopAccepting();
  for (auto s : pendingCloseSockets_) {
    closeNoInt(s);
  }
  // Then call DelayedDestruction::destroy() to take care of
  // whether or not we need immediate or delayed destruction
  DelayedDestruction::destroy();
}

void AsyncServerSocket::attachEventBase(EventBase *eventBase) {
  assert(eventBase_ == nullptr);
  assert(eventBase->isInEventBaseThread());

  eventBase_ = eventBase;
  for (auto& handler : sockets_) {
    handler.attachEventBase(eventBase);
  }
}

void AsyncServerSocket::detachEventBase() {
  assert(eventBase_ != nullptr);
  assert(eventBase_->isInEventBaseThread());
  assert(!accepting_);

  eventBase_ = nullptr;
  for (auto& handler : sockets_) {
    handler.detachEventBase();
  }
}

void AsyncServerSocket::useExistingSockets(const std::vector<int>& fds) {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  if (sockets_.size() > 0) {
    throw std::invalid_argument(
                              "cannot call useExistingSocket() on a "
                              "AsyncServerSocket that already has a socket");
  }

  for (auto fd: fds) {
    // Set addressFamily_ from this socket.
    // Note that the socket may not have been bound yet, but
    // setFromLocalAddress() will still work and get the correct address family.
    // We will update addressFamily_ again anyway if bind() is called later.
    SocketAddress address;
    address.setFromLocalAddress(fd);

    setupSocket(fd, address.getFamily());
    sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
    sockets_.back().changeHandlerFD(fd);
  }
}

void AsyncServerSocket::useExistingSocket(int fd) {
  useExistingSockets({fd});
}

void AsyncServerSocket::bindSocket(
    int fd,
    const SocketAddress& address,
    bool isExistingSocket) {
  sockaddr_storage addrStorage;
  address.getAddress(&addrStorage);
  sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);
  if (fsp::bind(fd, saddr, address.getActualSize()) != 0) {
    if (!isExistingSocket) {
      closeNoInt(fd);
    }
    folly::throwSystemError(errno,
        "failed to bind to async server socket: " +
        address.describe());
  }

  // If we just created this socket, update the EventHandler and set socket_
  if (!isExistingSocket) {
    sockets_.emplace_back(eventBase_, fd, this, address.getFamily());
  }
}

void AsyncServerSocket::bind(const SocketAddress& address) {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  // useExistingSocket() may have been called to initialize socket_ already.
  // However, in the normal case we need to create a new socket now.
  // Don't set socket_ yet, so that socket_ will remain uninitialized if an
  // error occurs.
  int fd;
  if (sockets_.size() == 0) {
    fd = createSocket(address.getFamily());
  } else if (sockets_.size() == 1) {
    if (address.getFamily() != sockets_[0].addressFamily_) {
      throw std::invalid_argument(
                                "Attempted to bind address to socket with "
                                "different address family");
    }
    fd = sockets_[0].socket_;
  } else {
    throw std::invalid_argument(
                              "Attempted to bind to multiple fds");
  }

  bindSocket(fd, address, !sockets_.empty());
}

void AsyncServerSocket::bind(
    const std::vector<IPAddress>& ipAddresses,
    uint16_t port) {
  if (ipAddresses.empty()) {
    throw std::invalid_argument("No ip addresses were provided");
  }
  if (!sockets_.empty()) {
    throw std::invalid_argument("Cannot call bind on a AsyncServerSocket "
                                "that already has a socket.");
  }

  for (const IPAddress& ipAddress : ipAddresses) {
    SocketAddress address(ipAddress.toFullyQualified(), port);
    int fd = createSocket(address.getFamily());

    bindSocket(fd, address, false);
  }
  if (sockets_.size() == 0) {
    throw std::runtime_error(
        "did not bind any async server socket for port and addresses");
  }
}

void AsyncServerSocket::bind(uint16_t port) {
  struct addrinfo hints, *res0;
  char sport[sizeof("65536")];

  memset(&hints, 0, sizeof(hints));
  hints.ai_family = AF_UNSPEC;
  hints.ai_socktype = SOCK_STREAM;
  hints.ai_flags = AI_PASSIVE | AI_NUMERICSERV;
  snprintf(sport, sizeof(sport), "%u", port);

  // On Windows the value we need to pass to bind to all available
  // addresses is an empty string. Everywhere else, it's nullptr.
  constexpr const char* kWildcardNode = kIsWindows ? "" : nullptr;
  if (getaddrinfo(kWildcardNode, sport, &hints, &res0)) {
    throw std::invalid_argument(
                              "Attempted to bind address to socket with "
                              "bad getaddrinfo");
  }

  SCOPE_EXIT { freeaddrinfo(res0); };

  auto setupAddress = [&] (struct addrinfo* res) {
    int s = fsp::socket(res->ai_family, res->ai_socktype, res->ai_protocol);
    // IPv6/IPv4 may not be supported by the kernel
    if (s < 0 && errno == EAFNOSUPPORT) {
      return;
    }
    CHECK_GE(s, 0);

    try {
      setupSocket(s, res->ai_family);
    } catch (...) {
      closeNoInt(s);
      throw;
    }

    if (res->ai_family == AF_INET6) {
      int v6only = 1;
      CHECK(0 == setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY,
                            &v6only, sizeof(v6only)));
    }

    // Bind to the socket
    if (fsp::bind(s, res->ai_addr, socklen_t(res->ai_addrlen)) != 0) {
      folly::throwSystemError(
          errno,
          "failed to bind to async server socket for port ",
          SocketAddress::getPortFrom(res->ai_addr),
          " family ",
          SocketAddress::getFamilyNameFrom(res->ai_addr, "<unknown>"));
    }

    SocketAddress address;
    address.setFromLocalAddress(s);

    sockets_.emplace_back(eventBase_, s, this, address.getFamily());
  };

  const int kNumTries = 25;
  for (int tries = 1; true; tries++) {
    // Prefer AF_INET6 addresses. RFC 3484 mandates that getaddrinfo
    // should return IPv6 first and then IPv4 addresses, but glibc's
    // getaddrinfo(nullptr) with AI_PASSIVE returns:
    // - 0.0.0.0 (IPv4-only)
    // - :: (IPv6+IPv4) in this order
    // See: https://sourceware.org/bugzilla/show_bug.cgi?id=9981
    for (struct addrinfo* res = res0; res; res = res->ai_next) {
      if (res->ai_family == AF_INET6) {
        setupAddress(res);
      }
    }

    // If port == 0, then we should try to bind to the same port on ipv4 and
    // ipv6.  So if we did bind to ipv6, figure out that port and use it.
    if (sockets_.size() == 1 && port == 0) {
      SocketAddress address;
      address.setFromLocalAddress(sockets_.back().socket_);
      snprintf(sport, sizeof(sport), "%u", address.getPort());
      freeaddrinfo(res0);
      CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
    }

    try {
      for (struct addrinfo* res = res0; res; res = res->ai_next) {
        if (res->ai_family != AF_INET6) {
          setupAddress(res);
        }
      }
    } catch (const std::system_error&) {
      // If we can't bind to the same port on ipv4 as ipv6 when using
      // port=0 then we will retry again before giving up after
      // kNumTries attempts.  We do this by closing the sockets that
      // were opened, then restarting from scratch.
      if (port == 0 && !sockets_.empty() && tries != kNumTries) {
        for (const auto& socket : sockets_) {
          if (socket.socket_ <= 0) {
            continue;
          } else if (shutdownSocketSet_) {
            shutdownSocketSet_->close(socket.socket_);
          } else {
            closeNoInt(socket.socket_);
          }
        }
        sockets_.clear();
        snprintf(sport, sizeof(sport), "%u", port);
        freeaddrinfo(res0);
        CHECK_EQ(0, getaddrinfo(nullptr, sport, &hints, &res0));
        continue;
      }

      throw;
    }

    break;
  }

  if (sockets_.size() == 0) {
    throw std::runtime_error(
        "did not bind any async server socket for port");
  }
}

void AsyncServerSocket::listen(int backlog) {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  // Start listening
  for (auto& handler : sockets_) {
    if (fsp::listen(handler.socket_, backlog) == -1) {
      folly::throwSystemError(errno,
                                    "failed to listen on async server socket");
    }
  }
}

void AsyncServerSocket::getAddress(SocketAddress* addressReturn) const {
  CHECK(sockets_.size() >= 1);
  VLOG_IF(2, sockets_.size() > 1)
    << "Warning: getAddress() called and multiple addresses available ("
    << sockets_.size() << "). Returning only the first one.";

  addressReturn->setFromLocalAddress(sockets_[0].socket_);
}

std::vector<SocketAddress> AsyncServerSocket::getAddresses()
    const {
  CHECK(sockets_.size() >= 1);
  auto tsaVec = std::vector<SocketAddress>(sockets_.size());
  auto tsaIter = tsaVec.begin();
  for (const auto& socket : sockets_) {
    (tsaIter++)->setFromLocalAddress(socket.socket_);
  };
  return tsaVec;
}

void AsyncServerSocket::addAcceptCallback(AcceptCallback *callback,
                                           EventBase *eventBase,
                                           uint32_t maxAtOnce) {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  // If this is the first accept callback and we are supposed to be accepting,
  // start accepting once the callback is installed.
  bool runStartAccepting = accepting_ && callbacks_.empty();

  callbacks_.emplace_back(callback, eventBase);

  SCOPE_SUCCESS {
    // If this is the first accept callback and we are supposed to be accepting,
    // start accepting.
    if (runStartAccepting) {
      startAccepting();
    }
  };

  if (!eventBase) {
    // Run in AsyncServerSocket's eventbase; notify that we are
    // starting to accept connections
    callback->acceptStarted();
    return;
  }

  // Start the remote acceptor.
  //
  // It would be nice if we could avoid starting the remote acceptor if
  // eventBase == eventBase_.  However, that would cause issues if
  // detachEventBase() and attachEventBase() were ever used to change the
  // primary EventBase for the server socket.  Therefore we require the caller
  // to specify a nullptr EventBase if they want to ensure that the callback is
  // always invoked in the primary EventBase, and to be able to invoke that
  // callback more efficiently without having to use a notification queue.
  RemoteAcceptor* acceptor = nullptr;
  try {
    acceptor = new RemoteAcceptor(callback, connectionEventCallback_);
    acceptor->start(eventBase, maxAtOnce, maxNumMsgsInQueue_);
  } catch (...) {
    callbacks_.pop_back();
    delete acceptor;
    throw;
  }
  callbacks_.back().consumer = acceptor;
}

void AsyncServerSocket::removeAcceptCallback(AcceptCallback *callback,
                                              EventBase *eventBase) {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  // Find the matching AcceptCallback.
  // We just do a simple linear search; we don't expect removeAcceptCallback()
  // to be called frequently, and we expect there to only be a small number of
  // callbacks anyway.
  std::vector<CallbackInfo>::iterator it = callbacks_.begin();
  uint32_t n = 0;
  while (true) {
    if (it == callbacks_.end()) {
      throw std::runtime_error("AsyncServerSocket::removeAcceptCallback(): "
                              "accept callback not found");
    }
    if (it->callback == callback &&
        (it->eventBase == eventBase || eventBase == nullptr)) {
      break;
    }
    ++it;
    ++n;
  }

  // Remove this callback from callbacks_.
  //
  // Do this before invoking the acceptStopped() callback, in case
  // acceptStopped() invokes one of our methods that examines callbacks_.
  //
  // Save a copy of the CallbackInfo first.
  CallbackInfo info(*it);
  callbacks_.erase(it);
  if (n < callbackIndex_) {
    // We removed an element before callbackIndex_.  Move callbackIndex_ back
    // one step, since things after n have been shifted back by 1.
    --callbackIndex_;
  } else {
    // We removed something at or after callbackIndex_.
    // If we removed the last element and callbackIndex_ was pointing at it,
    // we need to reset callbackIndex_ to 0.
    if (callbackIndex_ >= callbacks_.size()) {
      callbackIndex_ = 0;
    }
  }

  if (info.consumer) {
    // consumer could be nullptr is we run callbacks in primary event
    // base
    DCHECK(info.eventBase);
    info.consumer->stop(info.eventBase, info.callback);
  } else {
    // callback invoked in the primary event base, just call directly
    DCHECK(info.callback);
    callback->acceptStopped();
  }

  // If we are supposed to be accepting but the last accept callback
  // was removed, unregister for events until a callback is added.
  if (accepting_ && callbacks_.empty()) {
    for (auto& handler : sockets_) {
      handler.unregisterHandler();
    }
  }
}

void AsyncServerSocket::startAccepting() {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());

  accepting_ = true;
  if (callbacks_.empty()) {
    // We can't actually begin accepting if no callbacks are defined.
    // Wait until a callback is added to start accepting.
    return;
  }

  for (auto& handler : sockets_) {
    if (!handler.registerHandler(
          EventHandler::READ | EventHandler::PERSIST)) {
      throw std::runtime_error("failed to register for accept events");
    }
  }
}

void AsyncServerSocket::pauseAccepting() {
  assert(eventBase_ == nullptr || eventBase_->isInEventBaseThread());
  accepting_ = false;
  for (auto& handler : sockets_) {
   handler. unregisterHandler();
  }

  // If we were in the accept backoff state, disable the backoff timeout
  if (backoffTimeout_) {
    backoffTimeout_->cancelTimeout();
  }
}

int AsyncServerSocket::createSocket(int family) {
  int fd = fsp::socket(family, SOCK_STREAM, 0);
  if (fd == -1) {
    folly::throwSystemError(errno, "error creating async server socket");
  }

  try {
    setupSocket(fd, family);
  } catch (...) {
    closeNoInt(fd);
    throw;
  }
  return fd;
}

void AsyncServerSocket::setupSocket(int fd, int family) {
  // Put the socket in non-blocking mode
  if (fcntl(fd, F_SETFL, O_NONBLOCK) != 0) {
    folly::throwSystemError(errno,
                            "failed to put socket in non-blocking mode");
  }

  // Set reuseaddr to avoid 2MSL delay on server restart
  int one = 1;
  if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) != 0) {
    // This isn't a fatal error; just log an error message and continue
    LOG(ERROR) << "failed to set SO_REUSEADDR on async server socket " << errno;
  }

  // Set reuseport to support multiple accept threads
  int zero = 0;
  if (reusePortEnabled_ &&
      setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(int)) != 0) {
    LOG(ERROR) << "failed to set SO_REUSEPORT on async server socket "
               << strerror(errno);
#ifdef WIN32
    folly::throwSystemError(errno, "failed to bind to the async server socket");
#else
    SocketAddress address;
    address.setFromLocalAddress(fd);
    folly::throwSystemError(errno,
                            "failed to bind to async server socket: " +
                            address.describe());
#endif
  }

  // Set keepalive as desired
  if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE,
                 (keepAliveEnabled_) ? &one : &zero, sizeof(int)) != 0) {
    LOG(ERROR) << "failed to set SO_KEEPALIVE on async server socket: " <<
            strerror(errno);
  }

  // Setup FD_CLOEXEC flag
  if (closeOnExec_ &&
      (-1 == folly::setCloseOnExec(fd, closeOnExec_))) {
    LOG(ERROR) << "failed to set FD_CLOEXEC on async server socket: " <<
            strerror(errno);
  }

  // Set TCP nodelay if available, MAC OS X Hack
  // See http://lists.danga.com/pipermail/memcached/2005-March/001240.html
#ifndef TCP_NOPUSH
  if (family != AF_UNIX) {
    if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &one, sizeof(one)) != 0) {
      // This isn't a fatal error; just log an error message and continue
      LOG(ERROR) << "failed to set TCP_NODELAY on async server socket: " <<
              strerror(errno);
    }
  }
#endif

#if FOLLY_ALLOW_TFO
  if (tfo_ && detail::tfo_enable(fd, tfoMaxQueueSize_) != 0) {
    // This isn't a fatal error; just log an error message and continue
    LOG(WARNING) << "failed to set TCP_FASTOPEN on async server socket: "
                 << folly::errnoStr(errno);
  }
#endif

  if (shutdownSocketSet_) {
    shutdownSocketSet_->add(fd);
  }
}

void AsyncServerSocket::handlerReady(uint16_t /* events */,
                                     int fd,
                                     sa_family_t addressFamily) noexcept {
  assert(!callbacks_.empty());
  DestructorGuard dg(this);

  // Only accept up to maxAcceptAtOnce_ connections at a time,
  // to avoid starving other I/O handlers using this EventBase.
  for (uint32_t n = 0; n < maxAcceptAtOnce_; ++n) {
    SocketAddress address;

    sockaddr_storage addrStorage;
    socklen_t addrLen = sizeof(addrStorage);
    sockaddr* saddr = reinterpret_cast<sockaddr*>(&addrStorage);

    // In some cases, accept() doesn't seem to update these correctly.
    saddr->sa_family = addressFamily;
    if (addressFamily == AF_UNIX) {
      addrLen = sizeof(struct sockaddr_un);
    }

    // Accept a new client socket
#ifdef SOCK_NONBLOCK
    int clientSocket = accept4(fd, saddr, &addrLen, SOCK_NONBLOCK);
#else
    int clientSocket = accept(fd, saddr, &addrLen);
#endif

    address.setFromSockaddr(saddr, addrLen);

    if (clientSocket >= 0 && connectionEventCallback_) {
      connectionEventCallback_->onConnectionAccepted(clientSocket, address);
    }

    std::chrono::time_point<std::chrono::steady_clock> nowMs =
      std::chrono::steady_clock::now();
    auto timeSinceLastAccept = std::max<int64_t>(
      0,
      nowMs.time_since_epoch().count() -
      lastAccepTimestamp_.time_since_epoch().count());
    lastAccepTimestamp_ = nowMs;
    if (acceptRate_ < 1) {
      acceptRate_ *= 1 + acceptRateAdjustSpeed_ * timeSinceLastAccept;
      if (acceptRate_ >= 1) {
        acceptRate_ = 1;
      } else if (rand() > acceptRate_ * RAND_MAX) {
        ++numDroppedConnections_;
        if (clientSocket >= 0) {
          closeNoInt(clientSocket);
          if (connectionEventCallback_) {
            connectionEventCallback_->onConnectionDropped(clientSocket,
                                                          address);
          }
        }
        continue;
      }
    }

    if (clientSocket < 0) {
      if (errno == EAGAIN) {
        // No more sockets to accept right now.
        // Check for this code first, since it's the most common.
        return;
      } else if (errno == EMFILE || errno == ENFILE) {
        // We're out of file descriptors.  Perhaps we're accepting connections
        // too quickly. Pause accepting briefly to back off and give the server
        // a chance to recover.
        LOG(ERROR) << "accept failed: out of file descriptors; entering accept "
                "back-off state";
        enterBackoff();

        // Dispatch the error message
        dispatchError("accept() failed", errno);
      } else {
        dispatchError("accept() failed", errno);
      }
      if (connectionEventCallback_) {
        connectionEventCallback_->onConnectionAcceptError(errno);
      }
      return;
    }

#ifndef SOCK_NONBLOCK
    // Explicitly set the new connection to non-blocking mode
    if (fcntl(clientSocket, F_SETFL, O_NONBLOCK) != 0) {
      closeNoInt(clientSocket);
      dispatchError("failed to set accepted socket to non-blocking mode",
                    errno);
      if (connectionEventCallback_) {
        connectionEventCallback_->onConnectionDropped(clientSocket, address);
      }
      return;
    }
#endif

    // Inform the callback about the new connection
    dispatchSocket(clientSocket, std::move(address));

    // If we aren't accepting any more, break out of the loop
    if (!accepting_ || callbacks_.empty()) {
      break;
    }
  }
}

void AsyncServerSocket::dispatchSocket(int socket,
                                        SocketAddress&& address) {
  uint32_t startingIndex = callbackIndex_;

  // Short circuit if the callback is in the primary EventBase thread

  CallbackInfo *info = nextCallback();
  if (info->eventBase == nullptr) {
    info->callback->connectionAccepted(socket, address);
    return;
  }

  const SocketAddress addr(address);
  // Create a message to send over the notification queue
  QueueMessage msg;
  msg.type = MessageType::MSG_NEW_CONN;
  msg.address = std::move(address);
  msg.fd = socket;

  // Loop until we find a free queue to write to
  while (true) {
    if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
      if (connectionEventCallback_) {
        connectionEventCallback_->onConnectionEnqueuedForAcceptorCallback(
            socket,
            addr);
      }
      // Success! return.
      return;
    }

    // We couldn't add to queue.  Fall through to below

    ++numDroppedConnections_;
    if (acceptRateAdjustSpeed_ > 0) {
      // aggressively decrease accept rate when in trouble
      static const double kAcceptRateDecreaseSpeed = 0.1;
      acceptRate_ *= 1 - kAcceptRateDecreaseSpeed;
    }


    if (callbackIndex_ == startingIndex) {
      // The notification queue was full
      // We can't really do anything at this point other than close the socket.
      //
      // This should only happen if a user's service is behaving extremely
      // badly and none of the EventBase threads are looping fast enough to
      // process the incoming connections.  If the service is overloaded, it
      // should use pauseAccepting() to temporarily back off accepting new
      // connections, before they reach the point where their threads can't
      // even accept new messages.
      LOG(ERROR) << "failed to dispatch newly accepted socket:"
                 << " all accept callback queues are full";
      closeNoInt(socket);
      if (connectionEventCallback_) {
        connectionEventCallback_->onConnectionDropped(socket, addr);
      }
      return;
    }

    info = nextCallback();
  }
}

void AsyncServerSocket::dispatchError(const char *msgstr, int errnoValue) {
  uint32_t startingIndex = callbackIndex_;
  CallbackInfo *info = nextCallback();

  // Create a message to send over the notification queue
  QueueMessage msg;
  msg.type = MessageType::MSG_ERROR;
  msg.err = errnoValue;
  msg.msg = std::move(msgstr);

  while (true) {
    // Short circuit if the callback is in the primary EventBase thread
    if (info->eventBase == nullptr) {
      std::runtime_error ex(
        std::string(msgstr) +  folly::to<std::string>(errnoValue));
      info->callback->acceptError(ex);
      return;
    }

    if (info->consumer->getQueue()->tryPutMessageNoThrow(std::move(msg))) {
      return;
    }
    // Fall through and try another callback

    if (callbackIndex_ == startingIndex) {
      // The notification queues for all of the callbacks were full.
      // We can't really do anything at this point.
      LOG(ERROR) << "failed to dispatch accept error: all accept callback "
        "queues are full: error msg:  " <<
        msg.msg.c_str() << errnoValue;
      return;
    }
    info = nextCallback();
  }
}

void AsyncServerSocket::enterBackoff() {
  // If this is the first time we have entered the backoff state,
  // allocate backoffTimeout_.
  if (backoffTimeout_ == nullptr) {
    try {
      backoffTimeout_ = new BackoffTimeout(this);
    } catch (const std::bad_alloc&) {
      // Man, we couldn't even allocate the timer to re-enable accepts.
      // We must be in pretty bad shape.  Don't pause accepting for now,
      // since we won't be able to re-enable ourselves later.
      LOG(ERROR) << "failed to allocate AsyncServerSocket backoff"
                 << " timer; unable to temporarly pause accepting";
      if (connectionEventCallback_) {
        connectionEventCallback_->onBackoffError();
      }
      return;
    }
  }

  // For now, we simply pause accepting for 1 second.
  //
  // We could add some smarter backoff calculation here in the future.  (e.g.,
  // start sleeping for longer if we keep hitting the backoff frequently.)
  // Typically the user needs to figure out why the server is overloaded and
  // fix it in some other way, though.  The backoff timer is just a simple
  // mechanism to try and give the connection processing code a little bit of
  // breathing room to catch up, and to avoid just spinning and failing to
  // accept over and over again.
  const uint32_t timeoutMS = 1000;
  if (!backoffTimeout_->scheduleTimeout(timeoutMS)) {
    LOG(ERROR) << "failed to schedule AsyncServerSocket backoff timer;"
               << "unable to temporarly pause accepting";
    if (connectionEventCallback_) {
      connectionEventCallback_->onBackoffError();
    }
    return;
  }

  // The backoff timer is scheduled to re-enable accepts.
  // Go ahead and disable accepts for now.  We leave accepting_ set to true,
  // since that tracks the desired state requested by the user.
  for (auto& handler : sockets_) {
    handler.unregisterHandler();
  }
  if (connectionEventCallback_) {
    connectionEventCallback_->onBackoffStarted();
  }
}

void AsyncServerSocket::backoffTimeoutExpired() {
  // accepting_ should still be true.
  // If pauseAccepting() was called while in the backoff state it will cancel
  // the backoff timeout.
  assert(accepting_);
  // We can't be detached from the EventBase without being paused
  assert(eventBase_ != nullptr && eventBase_->isInEventBaseThread());

  // If all of the callbacks were removed, we shouldn't re-enable accepts
  if (callbacks_.empty()) {
    if (connectionEventCallback_) {
      connectionEventCallback_->onBackoffEnded();
    }
    return;
  }

  // Register the handler.
  for (auto& handler : sockets_) {
    if (!handler.registerHandler(
          EventHandler::READ | EventHandler::PERSIST)) {
      // We're hosed.  We could just re-schedule backoffTimeout_ to
      // re-try again after a little bit.  However, we don't want to
      // loop retrying forever if we can't re-enable accepts.  Just
      // abort the entire program in this state; things are really bad
      // and restarting the entire server is probably the best remedy.
      LOG(ERROR)
        << "failed to re-enable AsyncServerSocket accepts after backoff; "
        << "crashing now";
      abort();
    }
  }
  if (connectionEventCallback_) {
    connectionEventCallback_->onBackoffEnded();
  }
}



} // folly