code
stringlengths
59
4.4k
docstring
stringlengths
5
7.69k
def set_keyring(keyring): global _keyring_backend if not isinstance(keyring, backend.KeyringBackend): raise TypeError("The keyring must be a subclass of KeyringBackend") _keyring_backend = keyring
Set current keyring backend.
def run_program(prog_list, debug, shell): try: if not shell: process = Popen(prog_list, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() retcode = process.returncode if debug >= 1: print("Program : ", " ".join(prog_list)) ...
Run a program and check program return code Note that some commands don't work well with Popen. So if this function is specifically called with 'shell=True', then it will run the old 'os.system'. In which case, there is no program output
def fetch_repo_creation_date(self): gh = self.github user = self.options.user repo = self.options.project rc, data = gh.repos[user][repo].get() if rc == 200: return REPO_CREATED_TAG_NAME, data["created_at"] else: self.raise_GitHubError(rc, data, gh...
Get the creation date of the repository from GitHub. :rtype: str, str :return: special tag name, creation date as ISO date string
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100): if states is not None: self.skeleton.set_body_states(states) for frame_no, frame in enumerate(angles): if frame_no < start: continue if frame_no >= end: ...
Follow a set of angle data, yielding dynamic joint torques. Parameters ---------- angles : ndarray (num-frames x num-dofs) Follow angle data provided by this array of angle values. start : int, optional Start following angle data after this frame. Defaults to the...
def recursive_glob(base_directory, regex=''): files = glob(op.join(base_directory, regex)) for path, dirlist, filelist in os.walk(base_directory): for dir_name in dirlist: files.extend(glob(op.join(path, dir_name, regex))) return files
Uses glob to find all files or folders that match the regex starting from the base_directory. Parameters ---------- base_directory: str regex: str Returns ------- files: list
def process_point_value(cls, command_type, command, index, op_type): _log.debug('Processing received point value for index {}: {}'.format(index, command))
A PointValue was received from the Master. Process its payload. :param command_type: (string) Either 'Select' or 'Operate'. :param command: A ControlRelayOutputBlock or else a wrapped data value (AnalogOutputInt16, etc.). :param index: (integer) DNP3 index of the payload's data definition. ...
def add_color_to_scheme(scheme, name, foreground, background, palette_colors): if foreground is None and background is None: return scheme new_scheme = [] for item in scheme: if item[0] == name: if foreground is None: foreground = item[1] if background...
Add foreground and background colours to a color scheme
def array_2d_from_array_1d(self, array_1d): return mapping_util.map_masked_1d_array_to_2d_array_from_array_1d_shape_and_one_to_two( array_1d=array_1d, shape=self.mask.shape, one_to_two=self.mask.masked_grid_index_to_pixel)
Map a 1D array the same dimension as the grid to its original masked 2D array. Parameters ----------- array_1d : ndarray The 1D array which is mapped to its masked 2D array.
def connect(): ftp_class = ftplib.FTP if not SSL else ftplib.FTP_TLS ftp = ftp_class(timeout=TIMEOUT) ftp.connect(HOST, PORT) ftp.login(USER, PASSWORD) if SSL: ftp.prot_p() return ftp
Connect to FTP server, login and return an ftplib.FTP instance.
def to_table(components, topo_info): inputs, outputs = defaultdict(list), defaultdict(list) for ctype, component in components.items(): if ctype == 'bolts': for component_name, component_info in component.items(): for input_stream in component_info['inputs']: input_name = input_stream['c...
normalize raw logical plan info to table
def pickle_load(path, compression=False): if compression: with zipfile.ZipFile(path, "r", compression=zipfile.ZIP_DEFLATED) as myzip: with myzip.open("data") as f: return pickle.load(f) else: with open(path, "rb") as f: return pickle.load(f)
Unpickle a possible compressed pickle. Parameters ---------- path: str path to the output file compression: bool if true assumes that pickle was compressed when created and attempts decompression. Returns ------- obj: object the unpickled object
def get_stdout(self, workflow_id, task_id): url = '%(wf_url)s/%(wf_id)s/tasks/%(task_id)s/stdout' % { 'wf_url': self.workflows_url, 'wf_id': workflow_id, 'task_id': task_id } r = self.gbdx_connection.get(url) r.raise_for_status() return r.text
Get stdout for a particular task. Args: workflow_id (str): Workflow id. task_id (str): Task id. Returns: Stdout of the task (string).
def _getMatchingRowsNoRetries(self, tableInfo, conn, fieldsToMatch, selectFieldNames, maxRows=None): assert fieldsToMatch, repr(fieldsToMatch) assert all(k in tableInfo.dbFieldNames for k in fieldsToMatch.iterkeys()), repr(fieldsToMatch) assert selectFieldNames...
Return a sequence of matching rows with the requested field values from a table or empty sequence if nothing matched. tableInfo: Table information: a ClientJobsDAO._TableInfoBase instance conn: Owned connection acquired from ConnectionFactory.get() fieldsToMatch: Dictionary of inter...
def clone(self): result = copy.copy(self) result._compound_mfrs = copy.deepcopy(self._compound_mfrs) return result
Create a complete copy of the stream. :returns: A new MaterialStream object.
def makedirs(path): if not os.path.isdir(path): os.makedirs(path) return path
Create directories if they do not exist, otherwise do nothing. Return path for convenience
def get_element_mfr(self, element): result = 0.0 for compound in self.material.compounds: formula = compound.split('[')[0] result += self.get_compound_mfr(compound) *\ stoich.element_mass_fraction(formula, element) return result
Determine the mass flow rate of the specified elements in the stream. :returns: Mass flow rates. [kg/h]
def characters(quantity=10): line = map(_to_lower_alpha_only, ''.join(random.sample(get_dictionary('lorem_ipsum'), quantity))) return ''.join(line)[:quantity]
Return random characters.
def create_log_config(verbose, quiet): if verbose and quiet: raise ValueError( "Supplying both --quiet and --verbose makes no sense." ) elif verbose: level = logging.DEBUG elif quiet: level = logging.ERROR else: level = logging.INFO logger_cfg = {"...
We use logging's levels as an easy-to-use verbosity controller.
def _is_ndb(self): if isinstance(self._model, type): if _NDB_MODEL is not None and issubclass(self._model, _NDB_MODEL): return True elif issubclass(self._model, db.Model): return False raise TypeError( 'Model class not an NDB or DB mode...
Determine whether the model of the instance is an NDB model. Returns: Boolean indicating whether or not the model is an NDB or DB model.
def convert_clip(params, w_name, scope_name, inputs, layers, weights, names): print('Converting clip ...') if params['min'] == 0: print("using ReLU({0})".format(params['max'])) layer = keras.layers.ReLU(max_value=params['max']) else: def target_layer(x, vmin=params['min'], vmax=param...
Convert clip operation. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for ke...
def add_newlines(f, output, char): line_count = get_line_count(f) f = open(f, 'r+') output = open(output, 'r+') for line in range(line_count): string = f.readline() string = re.sub(char, char + '\n', string) output.write(string)
Adds line breaks after every occurance of a given character in a file. Args: f: string, path to input file. output: string, path to output file. Returns: None.
def set_training(model, mode): if mode is None: yield return old_mode = model.training if old_mode != mode: model.train(mode) try: yield finally: if old_mode != mode: model.train(old_mode)
A context manager to temporarily set the training mode of 'model' to 'mode', resetting it when we exit the with-block. A no-op if mode is None.
def fit(self, X, y): word_vector_transformer = WordVectorTransformer(padding='max') X = word_vector_transformer.fit_transform(X) X = LongTensor(X) self.word_vector_transformer = word_vector_transformer y_transformer = LabelEncoder() y = y_transformer.fit_transform(y) ...
Fit KimCNNClassifier according to X, y Parameters ---------- X : list of string each item is a raw text y : list of string each item is a label
def copy_file(self, path, prefixed_path, source_storage): if prefixed_path in self.copied_files: return self.log("Skipping '%s' (already copied earlier)" % path) if not self.delete_file(path, prefixed_path, source_storage): return source_path = source_storage.path(path) ...
Attempt to copy ``path`` with storage
def calculate_average_scores_on_graph( graph: BELGraph, key: Optional[str] = None, tag: Optional[str] = None, default_score: Optional[float] = None, runs: Optional[int] = None, use_tqdm: bool = False, ): subgraphs = generate_bioprocess_mechanisms(graph, key=key) s...
Calculate the scores over all biological processes in the sub-graph. As an implementation, it simply computes the sub-graphs then calls :func:`calculate_average_scores_on_subgraphs` as described in that function's documentation. :param graph: A BEL graph with heats already on the nodes :param key: The...
def _readxml(self): block = re.sub(r'<(/?)s>', r'&lt;\1s&gt;', self._readblock()) try: xml = XML(block) except ParseError: xml = None return xml
Read a block and return the result as XML :return: block as xml :rtype: xml.etree.ElementTree
def merge(*args): ret = [] for arg in args: if isinstance(arg, list) or isinstance(arg, tuple): ret += list(arg) else: ret.append(arg) return ret
Implements the 'merge' operator for merging lists.
def update_query_params(uri, params): parts = urllib.parse.urlparse(uri) query_params = parse_unique_urlencoded(parts.query) query_params.update(params) new_query = urllib.parse.urlencode(query_params) new_parts = parts._replace(query=new_query) return urllib.parse.urlunparse(new_parts)
Updates a URI with new query parameters. If a given key from ``params`` is repeated in the ``uri``, then the URI will be considered invalid and an error will occur. If the URI is valid, then each value from ``params`` will replace the corresponding value in the query parameters (if it exists). ...
def posteriori_covariance(self): r K = GLMM.covariance(self) tau = self._ep._posterior.tau return pinv(pinv(K) + diag(1 / tau))
r""" Covariance of the estimated posteriori.
def image(self, well_row, well_column, field_row, field_column): return next((i for i in self.images if attribute(i, 'u') == well_column and attribute(i, 'v') == well_row and attribute(i, 'x') == field_column and attrib...
Get path of specified image. Parameters ---------- well_row : int Starts at 0. Same as --U in files. well_column : int Starts at 0. Same as --V in files. field_row : int Starts at 0. Same as --Y in files. field_column : int ...
def j2(x): to_return = 2./(x+1e-15)*j1(x) - j0(x) to_return[x==0] = 0 return to_return
A fast j2 defined in terms of other special functions
def getInstanceJstack(self, topology_info, instance_id): pid_response = yield getInstancePid(topology_info, instance_id) try: http_client = tornado.httpclient.AsyncHTTPClient() pid_json = json.loads(pid_response) pid = pid_json['stdout'].strip() if pid == '': raise Exception('Fai...
Fetches Instance jstack from heron-shell.
def tsms(when, tz=None): if not when: return None when = totz(when, tz) return calendar.timegm(when.timetuple()) * 1000 + int(round(when.microsecond / 1000.0))
Return a Unix timestamp in milliseconds for the provided datetime. The `totz` function is called on the datetime to convert it to the provided timezone. It will be converted to UTC if no timezone is provided.
def bitsToString(arr): s = array('c','.'*len(arr)) for i in xrange(len(arr)): if arr[i] == 1: s[i]='*' return s
Returns a string representing a numpy array of 0's and 1's
def assign_params(sess, params, network): ops = [] for idx, param in enumerate(params): ops.append(network.all_params[idx].assign(param)) if sess is not None: sess.run(ops) return ops
Assign the given parameters to the TensorLayer network. Parameters ---------- sess : Session TensorFlow Session. params : list of array A list of parameters (array) in order. network : :class:`Layer` The network to be assigned. Returns -------- list of operation...
def count(self): xml = get_changeset(self.id) actions = [action.tag for action in xml.getchildren()] self.create = actions.count('create') self.modify = actions.count('modify') self.delete = actions.count('delete') self.verify_editor() try: if (self.cr...
Count the number of elements created, modified and deleted by the changeset and analyses if it is a possible import, mass modification or a mass deletion.
def bounding_box(self): min_x, min_y, max_x, max_y = zip(*list(self.walk_rows( lambda row: row.bounding_box))) return min(min_x), min(min_y), max(max_x), max(max_y)
The minimum and maximum bounds of this layout. :return: ``(min_x, min_y, max_x, max_y)`` the bounding box of this layout :rtype: tuple
def swish(x, name='swish'): with tf.name_scope(name): x = tf.nn.sigmoid(x) * x return x
Swish function. See `Swish: a Self-Gated Activation Function <https://arxiv.org/abs/1710.05941>`__. Parameters ---------- x : Tensor input. name: str function name (optional). Returns ------- Tensor A ``Tensor`` in the same type as ``x``.
def transform_title(self, content_metadata_item): title_with_locales = [] for locale in self.enterprise_configuration.get_locales(): title_with_locales.append({ 'locale': locale, 'value': content_metadata_item.get('title', '') }) return tit...
Return the title of the content item.
def annotate_metadata_data(repo, task, patterns=["*"], size=0): mgr = plugins_get_mgr() keys = mgr.search('representation')['representation'] representations = [mgr.get_by_key('representation', k) for k in keys] matching_files = repo.find_matching_files(patterns) package = repo.package rootdir ...
Update metadata with the content of the files
def ast2str(expr, level=0, names=None): if isinstance(expr, Expression): return ast2str(expr.body, 0, names) \ if hasattr(expr, "body") else "" elif isinstance(expr, Name): return names.get(expr.id, expr.id) if names else expr.id elif isinstance(expr, BoolOp): op = expr.o...
convert compiled ast to gene_reaction_rule str Parameters ---------- expr : str string for a gene reaction rule, e.g "a and b" level : int internal use only names : dict Dict where each element id a gene identifier and the value is the gene name. Use this to get a ru...
def multitype_sort(a): types = defaultdict(list) numbers = {int, float, complex} for x in a: t = type(x) if t in numbers: types['number'].append(x) else: types[t].append(x) for t in types: types[t] = np.sort(types[t]) return list(chain(*(types[...
Sort elements of multiple types x is assumed to contain elements of different types, such that plain sort would raise a `TypeError`. Parameters ---------- a : array-like Array of items to be sorted Returns ------- out : list Items sorted within their type groups.
def dist(self, src, tar): if src == tar: return 0.0 src_comp = self._rle.encode(self._bwt.encode(src)) tar_comp = self._rle.encode(self._bwt.encode(tar)) concat_comp = self._rle.encode(self._bwt.encode(src + tar)) concat_comp2 = self._rle.encode(self._bwt.encode(tar +...
Return the NCD between two strings using BWT plus RLE. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison Returns ------- float Compression distance Examples --...
def _ensure_managed_repos_dir_exists(): if not os.path.exists(constants.REPOS_DIR): os.makedirs(constants.REPOS_DIR)
Our exports file will be invalid if this folder doesn't exist, and the NFS server will not run correctly.
def convert_lsstdoc_tex( content, to_fmt, deparagraph=False, mathjax=False, smart=True, extra_args=None): augmented_content = '\n'.join((LSSTDOC_MACROS, content)) return convert_text( augmented_content, 'latex', to_fmt, deparagraph=deparagraph, mathjax=mathjax, smart=smar...
Convert lsstdoc-class LaTeX to another markup format. This function is a thin wrapper around `convert_text` that automatically includes common lsstdoc LaTeX macros. Parameters ---------- content : `str` Original content. to_fmt : `str` Output format for the content (see https:...
async def manage(self): cm = _ContextManager(self.database) if isinstance(self.database.obj, AIODatabase): cm.connection = await self.database.async_connect() else: cm.connection = self.database.connect() return cm
Manage a database connection.
def extract_features_and_generate_model(essays, algorithm=util_functions.AlgorithmTypes.regression): f = feature_extractor.FeatureExtractor() f.initialize_dictionaries(essays) train_feats = f.gen_feats(essays) set_score = numpy.asarray(essays._score, dtype=numpy.int) if len(util_functions.f7(list(se...
Feed in an essay set to get feature vector and classifier essays must be an essay set object additional array is an optional argument that can specify a numpy array of values to add in returns a trained FeatureExtractor object and a trained classifier
def pick_coda_from_decimal(decimal): decimal = Decimal(decimal) __, digits, exp = decimal.as_tuple() if exp < 0: return DIGIT_CODAS[digits[-1]] __, digits, exp = decimal.normalize().as_tuple() index = bisect_right(EXP_INDICES, exp) - 1 if index < 0: return DIGIT_CODAS[digits[-1]]...
Picks only a coda from a decimal.
def tool_factory(clsname, name, driver, base=GromacsCommand): clsdict = { 'command_name': name, 'driver': driver, '__doc__': property(base._get_gmx_docs) } return type(clsname, (base,), clsdict)
Factory for GromacsCommand derived types.
def get_extr_lics_xref(self, extr_lic): xrefs = list(self.graph.triples((extr_lic, RDFS.seeAlso, None))) return map(lambda xref_triple: xref_triple[2], xrefs)
Return a list of cross references.
def norm_remote_path(path): path = os.path.normpath(path) if path.startswith(os.path.sep): return path[1:] else: return path
Normalize `path`. All remote paths are absolute.
def save_component(self, component_name, save_path): component = self.get_component(component_name=component_name) self._validate_savable(component=component, component_name=component_name) return component.save(sess=self.session, save_path=save_path)
Saves a component of this model to the designated location. Args: component_name: The component to save. save_path: The location to save to. Returns: Checkpoint path where the component was saved.
def dsa_sign(private_key, data, hash_algorithm): if private_key.algorithm != 'dsa': raise ValueError('The key specified is not a DSA private key') return _sign(private_key, data, hash_algorithm)
Generates a DSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512" :raises: ValueError - when ...
def generateStats(filename, maxSamples = None,): statsCollectorMapping = {'float': FloatStatsCollector, 'int': IntStatsCollector, 'string': StringStatsCollector, 'datetime': DateTimeStatsCollector, 'b...
Collect statistics for each of the fields in the user input data file and return a stats dict object. Parameters: ------------------------------------------------------------------------------ filename: The path and name of the data file. maxSamples: Upper bound on the number of rows to...
def add_condor_job(self, token, batchmaketaskid, jobdefinitionfilename, outputfilename, errorfilename, logfilename, postfilename): parameters = dict() parameters['token'] = token parameters['batchmaketaskid'] = batchmaketaskid parameters['job...
Add a Condor DAG job to the Condor DAG associated with this Batchmake task :param token: A valid token for the user in question. :type token: string :param batchmaketaskid: id of the Batchmake task for this DAG :type batchmaketaskid: int | long :param jobdefinitionfilena...
def export(self, model_name, export_folder): for transformer in self.transformers: if isinstance(transformer, MultiLabelBinarizer): joblib.dump(transformer, join(export_folder, "label.transformer.bin"), protocol=2) i...
Export model and transformers to export_folder Parameters ---------- model_name: string name of model to export export_folder: string folder to store exported model and transformers
def clear(self, *args, **kwargs): super(Deposit, self).clear(*args, **kwargs)
Clear only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved.
def pitch_contour(annotation, **kwargs): ax = kwargs.pop('ax', None) ax = mir_eval.display.__get_axes(ax=ax)[0] times, values = annotation.to_interval_values() indices = np.unique([v['index'] for v in values]) for idx in indices: rows = [i for (i, v) in enumerate(values) if v['index'] == idx...
Plotting wrapper for pitch contours
def encode(self, o): if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if (_encoding is not None and not (_encoding == 'utf-8')): o = o.decode(_encoding) if self.ensure_ascii: ...
Return a JSON string representation of a Python data structure. >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) '{"foo": ["bar", "baz"]}'
def child(self, offset256): a = bytes(self.pubkey) + offset256 s = hashlib.sha256(a).digest() return self.derive_from_seed(s)
Derive new private key from this key and a sha256 "offset"
def pop_data(self): data = self.data self.data = SortedKeyList(key=self._key) return data
Replace this observation's data with a fresh container. Returns ------- annotation_data : SortedKeyList The original annotation data container
def _isCheckpointDir(checkpointDir): lastSegment = os.path.split(checkpointDir)[1] if lastSegment[0] == '.': return False if not checkpointDir.endswith(g_defaultCheckpointExtension): return False if not os.path.isdir(checkpointDir): return False return True
Return true iff checkpointDir appears to be a checkpoint directory.
def from_participantid(participant_id): return user.UserID( chat_id=participant_id.chat_id, gaia_id=participant_id.gaia_id )
Convert hangouts_pb2.ParticipantId to UserID.
def draw_freehand(self): if _ctx._ns["mousedown"]: x, y = mouse() if self.show_grid: x, y = self.grid.snap(x, y) if self.freehand_move == True: cmd = MOVETO self.freehand_move = False else: cmd = LINE...
Freehand sketching.
def get_user_by_email(self, email): parameters = dict() parameters['email'] = email response = self.request('midas.user.get', parameters) return response
Get a user by the email of that user. :param email: The email of the desired user. :type email: string :returns: The user requested. :rtype: dict
def _stack_positions(positions, pos_in_dollars=True): if pos_in_dollars: positions = get_percent_alloc(positions) positions = positions.drop('cash', axis='columns') positions = positions.stack() positions.index = positions.index.set_names(['dt', 'ticker']) return positions
Convert positions to percentages if necessary, and change them to long format. Parameters ---------- positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash...
def app_class(): try: pkg_resources.get_distribution('invenio-files-rest') from invenio_files_rest.app import Flask as FlaskBase except pkg_resources.DistributionNotFound: from flask import Flask as FlaskBase class Request(TrustedHostsMixin, FlaskBase.request_class): pass ...
Create Flask application class. Invenio-Files-REST needs to patch the Werkzeug form parsing in order to support streaming large file uploads. This is done by subclassing the Flask application class.
def write_json(self, fh, pretty=True): sjson = json.JSONEncoder().encode(self.json()) if pretty: json.dump(json.loads(sjson), fh, sort_keys=True, indent=4) else: json.dump(json.loads(sjson), fh) return
Write composite object to file handle in JSON format. Args: fh (file): File handle to write to. pretty (bool): Sort keys and indent in output.
def graph_background(s): if s.background == None: s._ctx.background(None) else: s._ctx.background(s.background) if s.depth: try: clr = colors.color(s.background).darker(0.2) p = s._ctx.rect(0, 0, s._ctx.WIDTH, s._ctx.HEIGHT, draw=False) colors.gr...
Graph background color.
def md_dimension_info(name, node): def _get_value(child_name): return getattr(node.find(child_name), 'text', None) resolution = _get_value('resolution') defaultValue = node.find("defaultValue") strategy = defaultValue.find("strategy") if defaultValue is not None else None strategy = strategy...
Extract metadata Dimension Info from an xml node
def fulladder_gate(variables, vartype=dimod.BINARY, name='FULL_ADDER'): variables = tuple(variables) if vartype is dimod.BINARY: configs = frozenset([(0, 0, 0, 0, 0), (0, 0, 1, 1, 0), (0, 1, 0, 1, 0), (0, 1, 1, 0, 1),...
Full adder. Args: variables (list): Variable labels for the and gate as `[in1, in2, in3, sum, carry]`, where `in1, in2, in3` are inputs to be added and `sum` and 'carry' the resultant outputs. vartype (Vartype, optional, default='BINARY'): Variable type. Accepted ...
def plot_grid(grid_arcsec, array, units, kpc_per_arcsec, pointsize, zoom_offset_arcsec): if grid_arcsec is not None: if zoom_offset_arcsec is not None: grid_arcsec -= zoom_offset_arcsec grid_units = convert_grid_units(grid_arcsec=grid_arcsec, array=array, units=units, ...
Plot a grid of points over the array of data on the figure. Parameters -----------. grid_arcsec : ndarray or data.array.grids.RegularGrid A grid of (y,x) coordinates in arc-seconds which may be plotted over the array. array : data.array.scaled_array.ScaledArray The 2D array of data...
def _handleModelRunnerException(jobID, modelID, jobsDAO, experimentDir, logger, e): msg = StringIO.StringIO() print >>msg, "Exception occurred while running model %s: %r (%s)" % ( modelID, e, type(e)) traceback.print_exc(None, msg) completionReason = jobsDAO.CMPL_REASON_ERROR...
Perform standard handling of an exception that occurs while running a model. Parameters: ------------------------------------------------------------------------- jobID: ID for this hypersearch job in the jobs table modelID: model ID jobsDAO: ClientJobsDAO instance ...
def included(self, path, is_dir=False): inclusive = None for pattern in self.patterns: if pattern.is_dir == is_dir and pattern.matches(path): inclusive = pattern.inclusive return inclusive
Check patterns in order, last match that includes or excludes `path` wins. Return `None` on undecided.
def upload(self, filename, location=''): current_folder = self._ftp.pwd() self.mkdir(location) self.cd(location) fl = open(filename, 'rb') filename = filename.split('/')[-1] self._ftp.storbinary('STOR %s' % filename, fl) fl.close() self.cd(current_folder)
Uploads a file on the server to the desired location :param filename: the name of the file to be uploaded. :type filename: string :param location: the directory in which the file will be stored. :type location: string
def get_configured_dns(): ips = [] try: output = subprocess.check_output(['nmcli', 'device', 'show']) output = output.decode('utf-8') for line in output.split('\n'): if 'DNS' in line: pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}" for hit in r...
Returns the configured DNS servers with the use f nmcli.
def calc_intent(self, query): matches = self.calc_intents(query) if len(matches) == 0: return MatchData('', '') best_match = max(matches, key=lambda x: x.conf) best_matches = (match for match in matches if match.conf == best_match.conf) return min(best_matches, key=la...
Tests all the intents against the query and returns match data of the best intent Args: query (str): Input sentence to test against intents Returns: MatchData: Best intent match
def FlagCxx14Features(filename, clean_lines, linenum, error): line = clean_lines.elided[linenum] include = Match(r'\s* if include and include.group(1) in ('scoped_allocator', 'shared_mutex'): error(filename, linenum, 'build/c++14', 5, ('<%s> is an unapproved C++14 header.') % include.group(1))
Flag those C++14 features that we restrict. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. error: The function to call with any errors found.
def _cleanup(self): self.exit() workspace = osp.join(os.getcwd(), 'octave-workspace') if osp.exists(workspace): os.remove(workspace)
Clean up resources used by the session.
def _GetNextLogCountPerToken(token): global _log_counter_per_token _log_counter_per_token[token] = 1 + _log_counter_per_token.get(token, -1) return _log_counter_per_token[token]
Wrapper for _log_counter_per_token. Args: token: The token for which to look up the count. Returns: The number of times this function has been called with *token* as an argument (starting at 0)
def write(self, chunk, serialize=False, format=None): self.require_not_closed() if chunk is None: return if serialize or format is not None: self.serialize(chunk, format=format) return if type(chunk) is six.binary_type: self._length += len(...
Writes the given chunk to the output buffer. @param[in] chunk Either a byte array, a unicode string, or a generator. If `chunk` is a generator then calling `self.write(<generator>)` is equivalent to: @code for x in <generator>: ...
def grompp_qtot(*args, **kwargs): qtot_pattern = re.compile('System has non-zero total charge: *(?P<qtot>[-+]?\d*\.\d+([eE][-+]\d+)?)') kwargs['stdout'] = False kwargs['stderr'] = False rc, output, error = grompp_warnonly(*args, **kwargs) gmxoutput = "\n".join([x for x in [output, error] if x is not...
Run ``gromacs.grompp`` and return the total charge of the system. :Arguments: The arguments are the ones one would pass to :func:`gromacs.grompp`. :Returns: The total charge as reported Some things to keep in mind: * The stdout output of grompp is only shown when an error occurs. For ...
def parse_query_string(self, query): if not query: return None if query[0] == '(': index = self.find_closing_braces(query) if index != len(query) - 1: raise Exception("Invalid syntax") else: return self.parse_query_string(query[1:-1]) start_index = query.find("(") ...
Returns a parse tree for the query, each of the node is a subclass of Operator. This is both a lexical as well as syntax analyzer step.
def solve_prop(self, goal, reset_method=True): r if self.Tmin is None or self.Tmax is None: raise Exception('Both a minimum and a maximum value are not present indicating there is not enough data for temperature dependency.') if not self.test_property_validity(goal): rais...
r'''Method to solve for the temperature at which a property is at a specified value. `T_dependent_property` is used to calculate the value of the property as a function of temperature; if `reset_method` is True, the best method is used at each temperature as the solver seeks a solution. ...
def _make_prefixed(self, name, is_element, declared_prefixes, declarations): namespace, name = self._split_qname(name, is_element) if namespace is None: prefix = None elif namespace in declared_prefixes: prefix = declared_prefixes[namespace] elif namespace in self...
Return namespace-prefixed tag or attribute name. Add appropriate declaration to `declarations` when neccessary. If no prefix for an element namespace is defined, make the elements namespace default (no prefix). For attributes, make up a prefix in such case. :Parameters: ...
def processed_shape(self, shape): for processor in self.preprocessors: shape = processor.processed_shape(shape=shape) return shape
Shape of preprocessed state given original shape. Args: shape: original state shape Returns: processed state shape
def transaction(func): @wraps(func) def wrapper(cls, *args, **kwargs): with (yield from cls.get_cursor(_CursorType.NAMEDTUPLE)) as c: try: yield from c.execute('BEGIN') result = (yield from func(cls, c, *args, **kwargs)) except Exception: ...
Provides a transacted cursor which will run in autocommit=false mode For any exception the transaction will be rolled back. Requires that the function being decorated is an instance of a class or object that yields a cursor from a get_cursor(cursor_type=CursorType.NAMEDTUPLE) coroutine or provides such an ...
def IsBlockInNameSpace(nesting_state, is_forward_declaration): if is_forward_declaration: return len(nesting_state.stack) >= 1 and ( isinstance(nesting_state.stack[-1], _NamespaceInfo)) return (len(nesting_state.stack) > 1 and nesting_state.stack[-1].check_namespace_indentation and isi...
Checks that the new block is directly in a namespace. Args: nesting_state: The _NestingState object that contains info about our state. is_forward_declaration: If the class is a forward declared class. Returns: Whether or not the new block is directly in a namespace.
def getAccountFromPrivateKey(self, wif): pub = self.publickey_from_wif(wif) return self.getAccountFromPublicKey(pub)
Obtain account name from private key
def _make_session(connection: Optional[str] = None) -> Session: if connection is None: connection = get_global_connection() engine = create_engine(connection) create_all(engine) session_cls = sessionmaker(bind=engine) session = session_cls() return session
Make a session.
def less_or_equal(a, b, *args): return ( less(a, b) or soft_equals(a, b) ) and (not args or less_or_equal(b, *args))
Implements the '<=' operator with JS-style type coertion.
def find_external_compartment(model): if model.boundary: counts = pd.Series(tuple(r.compartments)[0] for r in model.boundary) most = counts.value_counts() most = most.index[most == most.max()].to_series() else: most = None like_external = compartment_shortlist["e"] + ["e"] ...
Find the external compartment in the model. Uses a simple heuristic where the external compartment should be the one with the most exchange reactions. Arguments --------- model : cobra.Model A cobra model. Returns ------- str The putative external compartment.
def _nginx_location_spec(port_spec, bridge_ip): location_string_spec = "\t \t location / { \n" for location_setting in ['proxy_http_version 1.1;', 'proxy_set_header Upgrade $http_upgrade;', 'proxy_set_header Connection "upgrade";', ...
This will output the nginx location config string for specific port spec
def update_extend(dst, src): for k, v in src.items(): existing = dst.setdefault(k, []) for x in v: if x not in existing: existing.append(x)
Update the `dst` with the `src`, extending values where lists. Primiarily useful for integrating results from `get_library_config`.
def _fixupRandomEncoderParams(params, minVal, maxVal, minResolution): encodersDict = ( params["modelConfig"]["modelParams"]["sensorParams"]["encoders"] ) for encoder in encodersDict.itervalues(): if encoder is not None: if encoder["type"] == "RandomDistributedScalarEncoder": resolution = max...
Given model params, figure out the correct parameters for the RandomDistributed encoder. Modifies params in place.
def set_stream(self,stream): _unused = stream if self.joined and self.handler: self.handler.user_left(self.me,None) self.joined=False
Called when current stream changes. Mark the room not joined and inform `self.handler` that it was left. :Parameters: - `stream`: the new stream. :Types: - `stream`: `pyxmpp.stream.Stream`
def set_state(_id, body): url = DEVICE_URL % _id if "mode" in body: url = MODES_URL % _id arequest = requests.put(url, headers=HEADERS, data=json.dumps(body)) status_code = str(arequest.status_code) if status_code != '202': _LOGGER.error("State not accepte...
Set a devices state.
def upload_s3(cfg, path_to_zip_file, *use_s3): print('Uploading your new Lambda function') profile_name = cfg.get('profile') aws_access_key_id = cfg.get('aws_access_key_id') aws_secret_access_key = cfg.get('aws_secret_access_key') client = get_client( 's3', profile_name, aws_access_key_id, a...
Upload a function to AWS S3.
def log_file(self, url=None): if url is None: url = self.url f = re.sub("file://", "", url) try: with open(f, "a") as of: of.write(str(self.store.get_json_tuples(True))) except IOError as e: print(e) print("Could not write t...
Write to a local log file
def build_dag(data, samples): snames = [i.name for i in samples] dag = nx.DiGraph() joborder = JOBORDER[data.paramsdict["assembly_method"]] for sname in snames: for func in joborder: dag.add_node("{}-{}-{}".format(func, 0, sname)) for chunk in xrange(10): dag.add_...
build a directed acyclic graph describing jobs to be run in order.