Code review comment for lp://staging/~gary/juju-gui/ci2

Revision history for this message
Gary Poster (gary) wrote :

Reviewers: mp+156210_code.launchpad.net,

Message:
Please take a look.

Description:
Tweak CI tests

Retry unit tests more, and only specify an instance type in canonistack.

https://code.launchpad.net/~gary/juju-gui/ci2/+merge/156210

(do not edit description out of merge proposal)

Please review this at https://codereview.appspot.com/8083044/

Affected files:
   A [revision details]
   M lib/deploy_charm_for_testing.py
   M test/test_charm_running.py

Index: [revision details]
=== added file '[revision details]'
--- [revision details] 2012-01-01 00:00:00 +0000
+++ [revision details] 2012-01-01 00:00:00 +0000
@@ -0,0 +1,2 @@
+Old revision: <email address hidden>
+New revision: <email address hidden>

Index: lib/deploy_charm_for_testing.py
=== modified file 'lib/deploy_charm_for_testing.py'
--- lib/deploy_charm_for_testing.py 2013-03-28 15:40:14 +0000
+++ lib/deploy_charm_for_testing.py 2013-03-29 18:16:49 +0000
@@ -26,6 +26,7 @@
          print(err.output)
          raise

+
  # We found that the juju status call fails intermittently in
  # canonistack. This works around that particular fragility.
  @retry(subprocess.CalledProcessError, tries=3)
@@ -99,13 +100,21 @@
          wait_for_machine=wait_for_machine):
      """Deploy the Juju GUI service and wait for it to become available."""
      args = options()
+ # Get the IP that we should associate with the charm. This is only
used
+ # by Canonistack, and is effectively our flag for that environment.
+ instance_ip = os.environ.get("JUJU_INSTANCE_IP")
      try:
          print('Bootstrapping...')
- juju('bootstrap --environment juju-gui-testing '
- '--constraints instance-type=m1.small')
+ if instance_ip:
+ # We are deploying in Canonistack.
              # The default m1.tiny was so small that the improv server would
- # sometimes fail to start. The m1.medium is more difficult to
obtain
- # on canonistack than m1.small, so m1.small seems to be "just
right"
+ # sometimes fail to start. The m1.medium is more difficult to
+ # obtain on canonistack than m1.small, so m1.small seems to be
+ # "just right."
+ juju('bootstrap --environment juju-gui-testing '
+ '--constraints instance-type=m1.small')
+ else:
+ juju('bootstrap --environment juju-gui-testing')
          print('Deploying service...')
          options = {'serve-tests': True, 'staging': True, 'secure': False,
                     'juju-gui-source': args.origin}
@@ -116,8 +125,6 @@

          print('Waiting for service to start...')
          wait_for_machine()
- # Fetches the instance ID from the testing instances to apply an
IP to
- instance_ip = os.environ.get("JUJU_INSTANCE_IP")
          if instance_ip:
              print('Assigning JUJU_INSTANCE_IP %s' % instance_ip)
              instance_id = subprocess.check_output(

Index: test/test_charm_running.py
=== modified file 'test/test_charm_running.py'
--- test/test_charm_running.py 2013-03-27 22:52:28 +0000
+++ test/test_charm_running.py 2013-03-29 16:45:51 +0000
@@ -43,25 +43,30 @@
              # reasons yet to be determined.
              if stats.get('duration') or stats.get('end') or
stats['failures']:
                  return stats['tests'], stats['failures']
+
          def run_tests():
              self.wait_for_css_selector('#mocha-stats')
              try:
                  total, failures = self.wait_for(
- tests_completed, 'Unable to complete test run.',
timeout=90)
+ tests_completed, 'Unable to complete test run.',
+ timeout=90)
              except exceptions.TimeoutException:
                  print(self.driver.execute_script('return
testRunner.stats;'))
                  raise
              return total, failures
          self.load('/test/')
- total, failures = run_tests()
- if failures:
- # We sometimes see initial failures and we don't know why :-(.
- # Reload and retry.
- print(
- '{} failure(s) running {} tests. Retrying.'.format(
- failures, total))
- self.driver.refresh()
+ for i in range(5):
              total, failures = run_tests()
+ if failures and i < 4 and total < 100:
+ # XXX bug 1161937 gary 2013-03-29
+ # We sometimes see initial failures and we don't know
why :-(.
+ # Reload and retry.
+ print(
+ '{} failure(s) running {} tests. Retrying.'.format(
+ failures, total))
+ self.driver.refresh()
+ else:
+ break
          if failures:
              msg = '{} failure(s) running {} tests.'.format(failures, total)
              self.fail(msg)

« Back to merge proposal